diff --git a/bindata/cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml b/bindata/cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml new file mode 100644 index 000000000..8c531e1eb --- /dev/null +++ b/bindata/cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml @@ -0,0 +1,14 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: operator-allow-egress-to-api-server +spec: + podSelector: + matchLabels: + name: cert-manager-operator + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 diff --git a/bindata/cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml b/bindata/cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml new file mode 100644 index 000000000..01c13e6b8 --- /dev/null +++ b/bindata/cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml @@ -0,0 +1,14 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: operator-allow-ingress-to-metrics +spec: + podSelector: + matchLabels: + name: cert-manager-operator + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 8443 diff --git a/bindata/cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml b/bindata/cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml new file mode 100644 index 000000000..8ea5d0e8a --- /dev/null +++ b/bindata/cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml @@ -0,0 +1,11 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: operator-deny-all-traffic +spec: + podSelector: + matchLabels: + name: cert-manager-operator + policyTypes: + - Ingress + - Egress diff --git a/bundle/manifests/cert-manager-operator.clusterserviceversion.yaml b/bundle/manifests/cert-manager-operator.clusterserviceversion.yaml index 85dd8412e..89459b5cf 100644 --- a/bundle/manifests/cert-manager-operator.clusterserviceversion.yaml +++ b/bundle/manifests/cert-manager-operator.clusterserviceversion.yaml @@ -504,6 +504,7 @@ spec: resources: - ingresses - ingresses/finalizers + - networkpolicies verbs: - create - delete diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2c3335fe8..5ac6a1dc2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -194,6 +194,7 @@ rules: resources: - ingresses - ingresses/finalizers + - networkpolicies verbs: - create - delete diff --git a/go.mod b/go.mod index 5aea2dd20..35e5ef8fe 100644 --- a/go.mod +++ b/go.mod @@ -9,40 +9,41 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-logr/logr v1.4.2 github.com/golangci/golangci-lint v1.50.1 - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/google/go-jsonnet v0.17.0 github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1 github.com/mogensen/kubernetes-split-yaml v0.3.0 - github.com/onsi/ginkgo/v2 v2.19.0 - github.com/onsi/gomega v1.34.0 - github.com/openshift/api v0.0.0-20241101202457-04eb3fd119d2 - github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 - github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f - github.com/openshift/library-go v0.0.0-20250403134058-7c43fdf96c62 + github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/gomega v1.35.1 + github.com/openshift/api v0.0.0-20250320170726-75d64d71980b + github.com/openshift/build-machinery-go v0.0.0-20250102153059-e85a1a7ecb5c + github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 + github.com/openshift/library-go v0.0.0-20250609093359-ccdcf648dd95 github.com/operator-framework/operator-lib v0.11.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.10.0 - golang.org/x/tools v0.24.0 - k8s.io/api v0.31.7 - k8s.io/apiextensions-apiserver v0.31.1 - k8s.io/apimachinery v0.31.7 - k8s.io/client-go v0.31.7 - k8s.io/code-generator v0.31.1 - k8s.io/component-base v0.31.7 + golang.org/x/tools v0.26.0 + k8s.io/api v0.32.5 + k8s.io/apiextensions-apiserver v0.32.1 + k8s.io/apimachinery v0.32.5 + k8s.io/client-go v0.32.5 + k8s.io/code-generator v0.32.1 + k8s.io/component-base v0.32.5 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.31.7 - k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 + k8s.io/kubernetes v1.32.5 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230912183013-811757733433 sigs.k8s.io/controller-tools v0.15.0 - sigs.k8s.io/kustomize/kustomize/v5 v5.4.2 - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 + sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 sigs.k8s.io/yaml v1.4.0 ) require ( 4d63.com/gochecknoglobals v0.1.0 // indirect + cel.dev/expr v0.19.1 // indirect github.com/Abirdcfly/dupword v0.0.7 // indirect github.com/Antonboom/errname v0.1.7 // indirect github.com/Antonboom/nilnil v0.1.1 // indirect @@ -77,11 +78,10 @@ require ( github.com/daixiang0/gci v0.8.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect - github.com/distribution/reference v0.5.0 // indirect + github.com/distribution/reference v0.6.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.17.0 // indirect github.com/fatih/structtag v1.2.0 // indirect @@ -111,7 +111,6 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect @@ -122,10 +121,11 @@ require ( github.com/golangci/misspell v0.3.5 // indirect github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect - github.com/google/cel-go v0.20.1 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.22.0 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect @@ -134,13 +134,12 @@ require ( github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jgautheron/goconst v1.5.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect @@ -151,7 +150,6 @@ require ( github.com/kisielk/errcheck v1.6.2 // indirect github.com/kisielk/gotool v1.0.0 // indirect github.com/kkHAIKE/contextcheck v1.1.3 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.6 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -190,9 +188,9 @@ require ( github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.0.5 // indirect - github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quasilyte/go-ruleguard v0.3.18 // indirect github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect @@ -207,6 +205,7 @@ require ( github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect github.com/securego/gosec/v2 v2.13.1 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.2 // indirect @@ -238,37 +237,37 @@ require ( github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect - go.etcd.io/etcd/api/v3 v3.5.14 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect - go.etcd.io/etcd/client/v3 v3.5.14 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.etcd.io/etcd/api/v3 v3.5.21 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect + go.etcd.io/etcd/client/v3 v3.5.21 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.32.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.34.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/term v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.66.2 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -276,26 +275,27 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.3.3 // indirect - k8s.io/apiserver v0.31.7 // indirect + k8s.io/apiserver v0.32.5 // indirect k8s.io/cloud-provider v0.30.1 // indirect - k8s.io/component-helpers v0.30.1 // indirect - k8s.io/controller-manager v0.30.1 // indirect - k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect - k8s.io/kms v0.31.7 // indirect - k8s.io/kube-aggregator v0.31.1 // indirect - k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + k8s.io/component-helpers v0.32.1 // indirect + k8s.io/controller-manager v0.32.5 // indirect + k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect + k8s.io/kms v0.32.5 // indirect + k8s.io/kube-aggregator v0.32.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/kubelet v0.30.1 // indirect mvdan.cc/gofumpt v0.4.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/gateway-api v1.1.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect - sigs.k8s.io/kustomize/api v0.17.2 // indirect - sigs.k8s.io/kustomize/cmd/config v0.14.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect + sigs.k8s.io/kustomize/api v0.18.0 // indirect + sigs.k8s.io/kustomize/cmd/config v0.15.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect ) replace github.com/cert-manager/cert-manager => github.com/openshift/jetstack-cert-manager v1.16.4 diff --git a/go.sum b/go.sum index b3f28eaa8..0078042e4 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ 4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -137,8 +139,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= @@ -248,8 +250,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -297,12 +297,12 @@ github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSW github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= -github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= +github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -315,8 +315,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-jsonnet v0.17.0 h1:/9NIEfhK1NQRKl3sP2536b2+x5HnZMdql7x3yK/l8JY= github.com/google/go-jsonnet v0.17.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -337,8 +337,8 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -374,8 +374,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -394,8 +394,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= @@ -405,8 +403,8 @@ github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+D github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -428,8 +426,8 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw= github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -527,24 +525,24 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/openshift/api v0.0.0-20241101202457-04eb3fd119d2 h1:CguNy+2KzhJ3a3i7e4Bgm/ByfQpSSSPYmF9NLZskoUs= -github.com/openshift/api v0.0.0-20241101202457-04eb3fd119d2/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= -github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 h1:F0zE2bmdVvaEd18VXuGYQdJJ1FYJu4MIDW9PYZWc9No= -github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= -github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f h1:FRc0bVNWprihWS0GqQWzb3dY4dkCwpOP3mDw5NwSoR4= -github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f/go.mod h1:KiZi2mJRH1TOJ3FtBDYS6YvUL30s/iIXaGSUrSa36mo= +github.com/openshift/api v0.0.0-20250320170726-75d64d71980b h1:GGuFSHESP0BSOu70AqV4u9IVrjYdaeu4Id+HXRIOvkw= +github.com/openshift/api v0.0.0-20250320170726-75d64d71980b/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= +github.com/openshift/build-machinery-go v0.0.0-20250102153059-e85a1a7ecb5c h1:6XcszPFZpan4qll5XbdLll7n1So3IsPn28aw2j1obMo= +github.com/openshift/build-machinery-go v0.0.0-20250102153059-e85a1a7ecb5c/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 h1:4iliLcvr1P9EUMZgIaSNEKNQQzBn+L6PSequlFOuB6Q= +github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7/go.mod h1:2tcufBE4Cu6RNgDCxcUJepa530kGo5GFVfR9BSnndhI= github.com/openshift/jetstack-cert-manager v1.16.4 h1:WbvhOyZOV2kSPsSaTS+QmyyxGE2uGfWwJOgf6jFcZlw= github.com/openshift/jetstack-cert-manager v1.16.4/go.mod h1:6JQ/GAZ6dH+erqS1BbaqorPy8idJzCtWFUmJQBTjo6Q= github.com/openshift/kubernetes-sigs-controller-tools v0.12.1-0.20250220141355-6d2c85031fbc h1:kZVhTvUPWGX+K5hA1LPjxXqQH+mwW3ZthsNKzw6tEgk= github.com/openshift/kubernetes-sigs-controller-tools v0.12.1-0.20250220141355-6d2c85031fbc/go.mod h1:80xsUppuf2iNgiThH2bzDIN5204p5E93z+YtNnAJlHA= -github.com/openshift/library-go v0.0.0-20250403134058-7c43fdf96c62 h1:SSb9MKF7czLU0O3HZtMoOXuk/FYfau8ILDnzdciGjEo= -github.com/openshift/library-go v0.0.0-20250403134058-7c43fdf96c62/go.mod h1:l/3SegTa9x+ry2J213bh7+DBofXOOvdrqU4JC9ktJa0= +github.com/openshift/library-go v0.0.0-20250609093359-ccdcf648dd95 h1:h6G+60Frk+tlX3MAcp5dznWAXGIwR2BeqXqttc9w9l0= +github.com/openshift/library-go v0.0.0-20250609093359-ccdcf648dd95/go.mod h1:DAa3BGl0CFtkfJn/g5rU8kDDTErfMVA/QlFm4cvU+MI= github.com/operator-framework/operator-lib v0.11.0 h1:eYzqpiOfq9WBI4Trddisiq/X9BwCisZd3rIzmHRC9Z8= github.com/operator-framework/operator-lib v0.11.0/go.mod h1:RpyKhFAoG6DmKTDIwMuO6pI3LRc8IE9rxEYWy476o6g= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= @@ -579,8 +577,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -591,8 +589,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -618,8 +616,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -729,8 +727,8 @@ github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842 github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= @@ -746,48 +744,48 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= -go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= -go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0= -go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU= -go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ= -go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI= -go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8= -go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= -go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg= -go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk= -go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M= -go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= -go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA= -go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw= -go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok= -go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= +go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= +go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= +go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= +go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= +go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= +go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= +go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -803,8 +801,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -847,8 +845,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -886,8 +884,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -898,8 +896,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -912,8 +910,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -972,13 +970,12 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -987,13 +984,13 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1075,8 +1072,8 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1145,12 +1142,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1167,8 +1164,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1181,8 +1178,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1222,42 +1219,42 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -k8s.io/api v0.31.7 h1:wSo59nXpVXmaB6hgNVJCrdnKtyYoutIgpNNBbROBd2U= -k8s.io/api v0.31.7/go.mod h1:vLUha4nXRUGtQdayzsmjur0lQApK/sJSxyR/fwuujcU= -k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= -k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.7 h1:fpV8yLerIZFAkj0of66+i1ArPv/Btf9KO6Aulng7RRw= -k8s.io/apimachinery v0.31.7/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.7 h1:L1JQgfdQfvSZNBhXJljv9ekc/JirAZAmR3B2Np5mSKk= -k8s.io/apiserver v0.31.7/go.mod h1:IAHYtQWod93amFVHuFlPJAZkYF+aC5qblgDNsSvP9mQ= -k8s.io/client-go v0.31.7 h1:2+LFJc6Xw6rhmpDbN1NSmhoFLWBh62cPG/P+IfaTSGY= -k8s.io/client-go v0.31.7/go.mod h1:hrrMorBQ17LqzoKIxKg5cSWvmWl94EwA/MUF0Mkf+Zw= +k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= +k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= +k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.5 h1:phmm2EOUVFI+cLiq8Grtuh166fTt/qgvkGPkpgzp5uY= +k8s.io/apiserver v0.32.5/go.mod h1:5bfueS1tgARVWVXRJBMI5mHoCmev0jOvbxebai/kiqc= +k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= +k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= k8s.io/cloud-provider v0.30.1 h1:OslHpog97zG9Kr7/vV1ki8nLKq8xTPUkN/kepCxBqKI= k8s.io/cloud-provider v0.30.1/go.mod h1:1uZp+FSskXQoeAAIU91/XCO8X/9N1U3z5usYeSLT4MI= -k8s.io/code-generator v0.31.1 h1:GvkRZEP2g2UnB2QKT2Dgc/kYxIkDxCHENv2Q1itioVs= -k8s.io/code-generator v0.31.1/go.mod h1:oL2ky46L48osNqqZAeOcWWy0S5BXj50vVdwOtTefqIs= -k8s.io/component-base v0.31.7 h1:HLGy3757F1xfidwYzrsrL6gqAML4jKiT1rfNTRv1vhw= -k8s.io/component-base v0.31.7/go.mod h1:OzaImRFsLuWLA/KyahAjR/pmG0dhxciGB9Rb1vTgdJ0= -k8s.io/component-helpers v0.30.1 h1:/UcxSLzZ0owluTE2WMDrFfZl2L+WVXKdYYYm68qnH7U= -k8s.io/component-helpers v0.30.1/go.mod h1:b1Xk27UJ3p/AmPqDx7khrnSxrdwQy9gTP7O1y6MZ6rg= -k8s.io/controller-manager v0.30.1 h1:vrpfinHQWGf40U08Zmrt+QxK/2yTgjJl/9DKtjaB1gI= -k8s.io/controller-manager v0.30.1/go.mod h1:8rTEPbn8LRKC/vS+If+JAKBfsftCfTMaF8/n4SJC+PQ= -k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk= -k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/code-generator v0.32.1 h1:4lw1kFNDuFYXquTkB7Sl5EwPMUP2yyW9hh6BnFfRZFY= +k8s.io/code-generator v0.32.1/go.mod h1:zaILfm00CVyP/6/pJMJ3zxRepXkxyDfUV5SNG4CjZI4= +k8s.io/component-base v0.32.5 h1:2HiX+m3s9Iz5CMqdCVDH2V942UqzQvjuhcXb4W+KCsg= +k8s.io/component-base v0.32.5/go.mod h1:jDsPNFFElv9m27TcYxlpEX7TZ3vdgx2g4PaqMUHpV/Y= +k8s.io/component-helpers v0.32.1 h1:TwdsSM1vW9GjnfX18lkrZbwE5G9psCIS2/rhenTDXd8= +k8s.io/component-helpers v0.32.1/go.mod h1:1JT1Ei3FD29yFQ18F3laj1WyvxYdHIhyxx6adKMFQXI= +k8s.io/controller-manager v0.32.5 h1:XeFdbhnpvSMr4WI1xASgYj4Eqt9OTcPh4lmJV88NGAk= +k8s.io/controller-manager v0.32.5/go.mod h1:NDWmzWlHAUBLDwtavRsF5O48ZGuLJezT8m82ehI7s+Y= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.31.7 h1:HSlveij+SIcCcDKugh/lFrnaooVgTIX5G6ircyK+e3Q= -k8s.io/kms v0.31.7/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94= -k8s.io/kube-aggregator v0.31.1 h1:vrYBTTs3xMrpiEsmBjsLETZE9uuX67oQ8B3i1BFfMPw= -k8s.io/kube-aggregator v0.31.1/go.mod h1:+aW4NX50uneozN+BtoCxI4g7ND922p8Wy3tWKFDiWVk= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/kms v0.32.5 h1:wnvdGfSniCyrXa3ukM7hTNlZPO05U8IJHa2scqYRYm4= +k8s.io/kms v0.32.5/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= +k8s.io/kube-aggregator v0.32.1 h1:cztPyIHbo6tgrhYHDqmdmvxUufJKuxgAC/vog7yeWek= +k8s.io/kube-aggregator v0.32.1/go.mod h1:sXjL5T8FO/rlBzTbBhahw9V5Nnr1UtzZHKTj9WxQCOU= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/kubelet v0.30.1 h1:6gS1gWjrefUGfC/9n0ITOzxnKyt89FfkIhom70Bola4= k8s.io/kubelet v0.30.1/go.mod h1:5IUeAt3YlIfLNdT/YfRuCCONfEefm7qfcqz81b002Z8= -k8s.io/kubernetes v1.31.7 h1:3uCu7kNQxNBVbbqqe0pPjbW+h8L9aT01rD0bg+NdS5I= -k8s.io/kubernetes v1.31.7/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs= -k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= -k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kubernetes v1.32.5 h1:PmYLXqBh09l3ez5xA8OqzVTYmjsF12+b8DSQel6phtQ= +k8s.io/kubernetes v1.32.5/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= @@ -1269,27 +1266,30 @@ mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2Yj rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230912183013-811757733433 h1:FXcZd0UUR/JUn1Lo3LuKWQuGK2xg4M9FKvJ7AAOA3JU= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230912183013-811757733433/go.mod h1:B6HLcvOy2S1qq2eWOFm9xepiKPMIc8Z9OXSPsnUDaR4= sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= -sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= -sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= -sigs.k8s.io/kustomize/cmd/config v0.14.1 h1:r1gRhgfPmnt7VYf4uxO8M27GX406n9kOOeScOH9IQds= -sigs.k8s.io/kustomize/cmd/config v0.14.1/go.mod h1:Sw1cPsFqh4uYczCWKlidPgMrsffLPCAB+7ytYLlauY4= -sigs.k8s.io/kustomize/kustomize/v5 v5.4.2 h1:9Zl5Gqg3XMdBEvkR54pVLCBj7FVO7W+VPNDDEzD6AyE= -sigs.k8s.io/kustomize/kustomize/v5 v5.4.2/go.mod h1:5ypfJVYlPb2MKKeoGknVLxvHemDlQT+szI4+KOhnD6k= -sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= -sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/cmd/config v0.15.0 h1:WkdY8V2+8J+W00YbImXa2ke9oegfrHH79e+kywW7EdU= +sigs.k8s.io/kustomize/cmd/config v0.15.0/go.mod h1:Jq57b0nPaoYUlOqg//0JtAh6iibboqMcfbtCYoWPM00= +sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 h1:o1mtt6vpxsxDYaZKrw3BnEtc+pAjLz7UffnIvHNbvW0= +sigs.k8s.io/kustomize/kustomize/v5 v5.5.0/go.mod h1:AeFCmgCrXzmvjWWaeZCyBp6XzG1Y0w1svYus8GhJEOE= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/controller/deployment/cert_manager_controller_set.go b/pkg/controller/deployment/cert_manager_controller_set.go index c8b449b01..1e892f479 100644 --- a/pkg/controller/deployment/cert_manager_controller_set.go +++ b/pkg/controller/deployment/cert_manager_controller_set.go @@ -21,6 +21,7 @@ type CertManagerControllerSet struct { certManagerWebhookDeploymentController factory.Controller certManagerCAInjectorStaticResourcesController factory.Controller certManagerCAInjectorDeploymentController factory.Controller + certManagerOperatorStaticResourcesController factory.Controller } func NewCertManagerControllerSet( @@ -44,6 +45,7 @@ func NewCertManagerControllerSet( certManagerWebhookDeploymentController: NewCertManagerWebhookDeploymentController(operatorClient, certManagerOperatorInformers, infraInformers, kubeClient, kubeInformersForTargetNamespace, eventRecorder, targetVersion, versionRecorder, trustedCAConfigmapName, cloudCredentialsSecretName), certManagerCAInjectorStaticResourcesController: NewCertManagerCAInjectorStaticResourcesController(operatorClient, kubeClientContainer, kubeInformersForNamespaces, eventRecorder), certManagerCAInjectorDeploymentController: NewCertManagerCAInjectorDeploymentController(operatorClient, certManagerOperatorInformers, infraInformers, kubeClient, kubeInformersForTargetNamespace, eventRecorder, targetVersion, versionRecorder, trustedCAConfigmapName, cloudCredentialsSecretName), + certManagerOperatorStaticResourcesController: NewCertManagerOperatorStaticResourcesController(operatorClient, kubeClientContainer, kubeInformersForNamespaces, eventRecorder), } } @@ -55,5 +57,6 @@ func (c *CertManagerControllerSet) ToArray() []factory.Controller { c.certManagerWebhookDeploymentController, c.certManagerCAInjectorStaticResourcesController, c.certManagerCAInjectorDeploymentController, + c.certManagerOperatorStaticResourcesController, } } diff --git a/pkg/controller/deployment/cert_manager_operator_static_resource.go b/pkg/controller/deployment/cert_manager_operator_static_resource.go new file mode 100644 index 000000000..c199a5753 --- /dev/null +++ b/pkg/controller/deployment/cert_manager_operator_static_resource.go @@ -0,0 +1,55 @@ +package deployment + +import ( + "github.com/openshift/cert-manager-operator/pkg/operator/operatorclient" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/staticresourcecontroller" + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" + + "github.com/openshift/cert-manager-operator/pkg/operator/assets" +) + +const ( + certManagerOperatorStaticResourcesControllerName = operatorName + "-operator-static-resources-" +) + +var ( + certManagerOperatorAssetFiles = []string{ + "cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml", + "cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml", + "cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml", + } +) + +func NewCertManagerOperatorStaticResourcesController(operatorClient v1helpers.OperatorClient, + kubeClientContainer *resourceapply.ClientHolder, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + eventsRecorder events.Recorder, +) factory.Controller { + return staticresourcecontroller.NewStaticResourceController( + certManagerOperatorStaticResourcesControllerName, + injectNamespace(operatorclient.OperatorNamespace), + certManagerOperatorAssetFiles, + kubeClientContainer, + operatorClient, + eventsRecorder, + ).AddKubeInformers(kubeInformersForNamespaces) +} + +func injectNamespace(namespace string) resourceapply.AssetFunc { + return func(name string) ([]byte, error) { + content := assets.MustAsset(name) + var obj unstructured.Unstructured + err := yaml.Unmarshal(content, &obj) + if err != nil { + return nil, err + } + obj.SetNamespace(namespace) + return yaml.Marshal(&obj) + } +} diff --git a/pkg/controller/deployment/certmanager_controller.go b/pkg/controller/deployment/certmanager_controller.go index cce21be95..70dbbcf30 100644 --- a/pkg/controller/deployment/certmanager_controller.go +++ b/pkg/controller/deployment/certmanager_controller.go @@ -54,7 +54,7 @@ type CertManagerReconciler struct { //+kubebuilder:rbac:groups="certificates.k8s.io",resources=signers,verbs=get;list;watch;create;update;patch;delete;sign //+kubebuilder:rbac:groups="cert-manager.io",resources=signers,resourceNames=clusterissuers.cert-manager.io/*;issuers.cert-manager.io/*,verbs=approve //+kubebuilder:rbac:groups="gateway.networking.k8s.io",resources=gateways;gateways/finalizers;httproutes;httproutes/finalizers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups="networking.k8s.io",resources=ingresses;ingresses/finalizers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="networking.k8s.io",resources=ingresses;ingresses/finalizers;networkpolicies,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="coordination.k8s.io",resources=leases,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="apiregistration.k8s.io",resources=apiservices,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="acme.cert-manager.io",resources=challenges;challenges/finalizers;challenges/status,verbs=get;list;watch;create;update;patch;delete diff --git a/pkg/controller/istiocsr/core_validation_helpers_duplication.go b/pkg/controller/istiocsr/core_validation_helpers_duplication.go index 5c7d09a4b..166286920 100644 --- a/pkg/controller/istiocsr/core_validation_helpers_duplication.go +++ b/pkg/controller/istiocsr/core_validation_helpers_duplication.go @@ -42,7 +42,8 @@ func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.Erro // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) // } if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, corevalidation.ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + allErrs = append(allErrs, corevalidation.ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, true, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { allErrs = append(allErrs, corevalidation.ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) diff --git a/pkg/controller/istiocsr/deployments.go b/pkg/controller/istiocsr/deployments.go index 8682531bf..640e875a9 100644 --- a/pkg/controller/istiocsr/deployments.go +++ b/pkg/controller/istiocsr/deployments.go @@ -433,7 +433,7 @@ func validateTolerationsConfig(tolerations []corev1.Toleration, fldPath *field.P func validateResourceRequirements(requirements corev1.ResourceRequirements, fldPath *field.Path) error { // convert corev1.ResourceRequirements to core.ResourceRequirements, required for validation. convRequirements := *(*core.ResourceRequirements)(unsafe.Pointer(&requirements)) - return corevalidation.ValidateResourceRequirements(&convRequirements, nil, fldPath.Child("resources"), corevalidation.PodValidationOptions{}).ToAggregate() + return corevalidation.ValidateContainerResourceRequirements(&convRequirements, nil, fldPath.Child("resources"), corevalidation.PodValidationOptions{}).ToAggregate() } func validateAffinityRules(affinity *corev1.Affinity, fldPath *field.Path) error { diff --git a/pkg/controller/istiocsr/deployments_test.go b/pkg/controller/istiocsr/deployments_test.go index c45df0a1b..07a3cc936 100644 --- a/pkg/controller/istiocsr/deployments_test.go +++ b/pkg/controller/istiocsr/deployments_test.go @@ -560,7 +560,7 @@ func TestCreateOrApplyDeployments(t *testing.T) { }, } }, - wantErr: "failed to generate deployment resource for creation in istiocsr-test-ns: failed to update pod tolerations: spec.istioCSRConfig.tolerations[0].operator: Invalid value: core.Toleration{Key:\"\", Operator:\"Exists\", Value:\"test\", Effect:\"NoSchedule\", TolerationSeconds:(*int64)(nil)}: value must be empty when `operator` is 'Exists'", + wantErr: "failed to generate deployment resource for creation in istiocsr-test-ns: failed to update pod tolerations: spec.istioCSRConfig.tolerations[0].operator: Invalid value: \"test\": value must be empty when `operator` is 'Exists'", }, { name: "deployment reconciliation with invalid nodeSelector configuration", diff --git a/pkg/operator/applyconfigurations/internal/internal.go b/pkg/operator/applyconfigurations/internal/internal.go index 0d25a588c..ab48e360d 100644 --- a/pkg/operator/applyconfigurations/internal/internal.go +++ b/pkg/operator/applyconfigurations/internal/internal.go @@ -3,8 +3,8 @@ package internal import ( - "fmt" - "sync" + fmt "fmt" + sync "sync" typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) diff --git a/pkg/operator/applyconfigurations/operator/v1alpha1/certmanager.go b/pkg/operator/applyconfigurations/operator/v1alpha1/certmanager.go index d49312327..104cb32f8 100644 --- a/pkg/operator/applyconfigurations/operator/v1alpha1/certmanager.go +++ b/pkg/operator/applyconfigurations/operator/v1alpha1/certmanager.go @@ -69,7 +69,7 @@ func extractCertManager(certManager *operatorv1alpha1.CertManager, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithKind(value string) *CertManagerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *CertManagerApplyConfiguration) WithKind(value string) *CertManagerApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithAPIVersion(value string) *CertManagerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *CertManagerApplyConfiguration) WithAPIVersion(value string) *CertManage // If called multiple times, the Name field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithName(value string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *CertManagerApplyConfiguration) WithName(value string) *CertManagerApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithGenerateName(value string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *CertManagerApplyConfiguration) WithGenerateName(value string) *CertMana // If called multiple times, the Namespace field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithNamespace(value string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *CertManagerApplyConfiguration) WithNamespace(value string) *CertManager // If called multiple times, the UID field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithUID(value types.UID) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *CertManagerApplyConfiguration) WithUID(value types.UID) *CertManagerApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithResourceVersion(value string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,7 +131,7 @@ func (b *CertManagerApplyConfiguration) WithResourceVersion(value string) *CertM // If called multiple times, the Generation field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithGeneration(value int64) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -140,7 +140,7 @@ func (b *CertManagerApplyConfiguration) WithGeneration(value int64) *CertManager // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -149,7 +149,7 @@ func (b *CertManagerApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *CertManagerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CertManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *CertManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *CertManagerApplyConfiguration) WithLabels(entries map[string]string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *CertManagerApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *CertManagerApplyConfiguration) WithAnnotations(entries map[string]string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -201,7 +201,7 @@ func (b *CertManagerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,7 +212,7 @@ func (b *CertManagerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *CertManagerApplyConfiguration) WithFinalizers(values ...string) *CertManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -242,5 +242,5 @@ func (b *CertManagerApplyConfiguration) WithStatus(value *CertManagerStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *CertManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerspec.go b/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerspec.go index b69903681..d6097f56c 100644 --- a/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerspec.go +++ b/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerspec.go @@ -27,7 +27,7 @@ func CertManagerSpec() *CertManagerSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *CertManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *CertManagerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -35,7 +35,7 @@ func (b *CertManagerSpecApplyConfiguration) WithManagementState(value operatorv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *CertManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *CertManagerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -43,7 +43,7 @@ func (b *CertManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLev // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *CertManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *CertManagerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -51,7 +51,7 @@ func (b *CertManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *CertManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *CertManagerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -59,7 +59,7 @@ func (b *CertManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *CertManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *CertManagerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerstatus.go b/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerstatus.go index a74acc1ca..e92b6ba89 100644 --- a/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerstatus.go +++ b/pkg/operator/applyconfigurations/operator/v1alpha1/certmanagerstatus.go @@ -22,7 +22,7 @@ func CertManagerStatus() *CertManagerStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *CertManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *CertManagerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -34,7 +34,7 @@ func (b *CertManagerStatusApplyConfiguration) WithConditions(values ...*v1.Opera if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -43,7 +43,7 @@ func (b *CertManagerStatusApplyConfiguration) WithConditions(values ...*v1.Opera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *CertManagerStatusApplyConfiguration) WithVersion(value string) *CertManagerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -51,7 +51,7 @@ func (b *CertManagerStatusApplyConfiguration) WithVersion(value string) *CertMan // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *CertManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *CertManagerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -59,7 +59,7 @@ func (b *CertManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *Ce // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *CertManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *CertManagerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -71,7 +71,7 @@ func (b *CertManagerStatusApplyConfiguration) WithGenerations(values ...*v1.Gene if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsr.go b/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsr.go index 02c32a769..4c2dfb726 100644 --- a/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsr.go +++ b/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsr.go @@ -71,7 +71,7 @@ func extractIstioCSR(istioCSR *operatorv1alpha1.IstioCSR, fieldManager string, s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithKind(value string) *IstioCSRApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -79,7 +79,7 @@ func (b *IstioCSRApplyConfiguration) WithKind(value string) *IstioCSRApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithAPIVersion(value string) *IstioCSRApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -88,7 +88,7 @@ func (b *IstioCSRApplyConfiguration) WithAPIVersion(value string) *IstioCSRApply // If called multiple times, the Name field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithName(value string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -97,7 +97,7 @@ func (b *IstioCSRApplyConfiguration) WithName(value string) *IstioCSRApplyConfig // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithGenerateName(value string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -106,7 +106,7 @@ func (b *IstioCSRApplyConfiguration) WithGenerateName(value string) *IstioCSRApp // If called multiple times, the Namespace field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithNamespace(value string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -115,7 +115,7 @@ func (b *IstioCSRApplyConfiguration) WithNamespace(value string) *IstioCSRApplyC // If called multiple times, the UID field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithUID(value types.UID) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -124,7 +124,7 @@ func (b *IstioCSRApplyConfiguration) WithUID(value types.UID) *IstioCSRApplyConf // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithResourceVersion(value string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -133,7 +133,7 @@ func (b *IstioCSRApplyConfiguration) WithResourceVersion(value string) *IstioCSR // If called multiple times, the Generation field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithGeneration(value int64) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -142,7 +142,7 @@ func (b *IstioCSRApplyConfiguration) WithGeneration(value int64) *IstioCSRApplyC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -151,7 +151,7 @@ func (b *IstioCSRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *I // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -160,7 +160,7 @@ func (b *IstioCSRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *I // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IstioCSRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -170,11 +170,11 @@ func (b *IstioCSRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IstioCSRApplyConfiguration) WithLabels(entries map[string]string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -185,11 +185,11 @@ func (b *IstioCSRApplyConfiguration) WithLabels(entries map[string]string) *Isti // overwriting an existing map entries in Annotations field with the same key. func (b *IstioCSRApplyConfiguration) WithAnnotations(entries map[string]string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -203,7 +203,7 @@ func (b *IstioCSRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -214,7 +214,7 @@ func (b *IstioCSRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe func (b *IstioCSRApplyConfiguration) WithFinalizers(values ...string) *IstioCSRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -244,5 +244,5 @@ func (b *IstioCSRApplyConfiguration) WithStatus(value *IstioCSRStatusApplyConfig // GetName retrieves the value of the Name field in the declarative configuration. func (b *IstioCSRApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsrstatus.go b/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsrstatus.go index 453350d71..f53bc0ca6 100644 --- a/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsrstatus.go +++ b/pkg/operator/applyconfigurations/operator/v1alpha1/istiocsrstatus.go @@ -31,7 +31,7 @@ func (b *IstioCSRStatusApplyConfiguration) WithConditions(values ...*v1.Conditio if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.ConditionalStatusApplyConfiguration.Conditions = append(b.ConditionalStatusApplyConfiguration.Conditions, *values[i]) } return b } diff --git a/pkg/operator/assets/bindata.go b/pkg/operator/assets/bindata.go index e6d5d7ee2..d8c134896 100644 --- a/pkg/operator/assets/bindata.go +++ b/pkg/operator/assets/bindata.go @@ -34,6 +34,9 @@ // bindata/cert-manager-deployment/controller/cert-manager-svc.yaml // bindata/cert-manager-deployment/controller/cert-manager-tokenrequest-role.yaml // bindata/cert-manager-deployment/controller/cert-manager-view-cr.yaml +// bindata/cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml +// bindata/cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml +// bindata/cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml // bindata/cert-manager-deployment/webhook/cert-manager-webhook-deployment.yaml // bindata/cert-manager-deployment/webhook/cert-manager-webhook-dynamic-serving-rb.yaml // bindata/cert-manager-deployment/webhook/cert-manager-webhook-dynamic-serving-role.yaml @@ -1815,6 +1818,96 @@ func certManagerDeploymentControllerCertManagerViewCrYaml() (*asset, error) { return a, nil } +var _certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYaml = []byte(`apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: operator-allow-egress-to-api-server +spec: + podSelector: + matchLabels: + name: cert-manager-operator + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +`) + +func certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYamlBytes() ([]byte, error) { + return _certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYaml, nil +} + +func certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYaml() (*asset, error) { + bytes, err := certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYaml = []byte(`apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: operator-allow-ingress-to-metrics +spec: + podSelector: + matchLabels: + name: cert-manager-operator + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 8443 +`) + +func certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYamlBytes() ([]byte, error) { + return _certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYaml, nil +} + +func certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYaml() (*asset, error) { + bytes, err := certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYaml = []byte(`apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: operator-deny-all-traffic +spec: + podSelector: + matchLabels: + name: cert-manager-operator + policyTypes: + - Ingress + - Egress +`) + +func certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYamlBytes() ([]byte, error) { + return _certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYaml, nil +} + +func certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYaml() (*asset, error) { + bytes, err := certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _certManagerDeploymentWebhookCertManagerWebhookDeploymentYaml = []byte(`apiVersion: apps/v1 kind: Deployment metadata: @@ -2795,6 +2888,9 @@ var _bindata = map[string]func() (*asset, error){ "cert-manager-deployment/controller/cert-manager-svc.yaml": certManagerDeploymentControllerCertManagerSvcYaml, "cert-manager-deployment/controller/cert-manager-tokenrequest-role.yaml": certManagerDeploymentControllerCertManagerTokenrequestRoleYaml, "cert-manager-deployment/controller/cert-manager-view-cr.yaml": certManagerDeploymentControllerCertManagerViewCrYaml, + "cert-manager-deployment/network-policy/operator-allow-egress-to-api-server.yaml": certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYaml, + "cert-manager-deployment/network-policy/operator-allow-ingress-to-metrics.yaml": certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYaml, + "cert-manager-deployment/network-policy/operator-deny-all-pod-selector.yaml": certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYaml, "cert-manager-deployment/webhook/cert-manager-webhook-deployment.yaml": certManagerDeploymentWebhookCertManagerWebhookDeploymentYaml, "cert-manager-deployment/webhook/cert-manager-webhook-dynamic-serving-rb.yaml": certManagerDeploymentWebhookCertManagerWebhookDynamicServingRbYaml, "cert-manager-deployment/webhook/cert-manager-webhook-dynamic-serving-role.yaml": certManagerDeploymentWebhookCertManagerWebhookDynamicServingRoleYaml, @@ -2900,6 +2996,11 @@ var _bintree = &bintree{nil, map[string]*bintree{ "cert-manager-tokenrequest-role.yaml": {certManagerDeploymentControllerCertManagerTokenrequestRoleYaml, map[string]*bintree{}}, "cert-manager-view-cr.yaml": {certManagerDeploymentControllerCertManagerViewCrYaml, map[string]*bintree{}}, }}, + "network-policy": {nil, map[string]*bintree{ + "operator-allow-egress-to-api-server.yaml": {certManagerDeploymentNetworkPolicyOperatorAllowEgressToApiServerYaml, map[string]*bintree{}}, + "operator-allow-ingress-to-metrics.yaml": {certManagerDeploymentNetworkPolicyOperatorAllowIngressToMetricsYaml, map[string]*bintree{}}, + "operator-deny-all-pod-selector.yaml": {certManagerDeploymentNetworkPolicyOperatorDenyAllPodSelectorYaml, map[string]*bintree{}}, + }}, "webhook": {nil, map[string]*bintree{ "cert-manager-webhook-deployment.yaml": {certManagerDeploymentWebhookCertManagerWebhookDeploymentYaml, map[string]*bintree{}}, "cert-manager-webhook-dynamic-serving-rb.yaml": {certManagerDeploymentWebhookCertManagerWebhookDynamicServingRbYaml, map[string]*bintree{}}, diff --git a/pkg/operator/clientset/versioned/clientset.go b/pkg/operator/clientset/versioned/clientset.go index 8b73539fa..c7dbb340c 100644 --- a/pkg/operator/clientset/versioned/clientset.go +++ b/pkg/operator/clientset/versioned/clientset.go @@ -3,8 +3,8 @@ package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/typed/operator/v1alpha1" discovery "k8s.io/client-go/discovery" diff --git a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/certmanager.go b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/certmanager.go index b92f8f59e..06ff49169 100644 --- a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/certmanager.go +++ b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/certmanager.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "context" + context "context" - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" - operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/applyconfigurations/operator/v1alpha1" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + applyconfigurationsoperatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/applyconfigurations/operator/v1alpha1" scheme "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type CertManagersGetter interface { // CertManagerInterface has methods to work with CertManager resources. type CertManagerInterface interface { - Create(ctx context.Context, certManager *v1alpha1.CertManager, opts v1.CreateOptions) (*v1alpha1.CertManager, error) - Update(ctx context.Context, certManager *v1alpha1.CertManager, opts v1.UpdateOptions) (*v1alpha1.CertManager, error) + Create(ctx context.Context, certManager *operatorv1alpha1.CertManager, opts v1.CreateOptions) (*operatorv1alpha1.CertManager, error) + Update(ctx context.Context, certManager *operatorv1alpha1.CertManager, opts v1.UpdateOptions) (*operatorv1alpha1.CertManager, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, certManager *v1alpha1.CertManager, opts v1.UpdateOptions) (*v1alpha1.CertManager, error) + UpdateStatus(ctx context.Context, certManager *operatorv1alpha1.CertManager, opts v1.UpdateOptions) (*operatorv1alpha1.CertManager, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CertManager, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CertManagerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*operatorv1alpha1.CertManager, error) + List(ctx context.Context, opts v1.ListOptions) (*operatorv1alpha1.CertManagerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertManager, err error) - Apply(ctx context.Context, certManager *operatorv1alpha1.CertManagerApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertManager, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1alpha1.CertManager, err error) + Apply(ctx context.Context, certManager *applyconfigurationsoperatorv1alpha1.CertManagerApplyConfiguration, opts v1.ApplyOptions) (result *operatorv1alpha1.CertManager, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, certManager *operatorv1alpha1.CertManagerApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertManager, err error) + ApplyStatus(ctx context.Context, certManager *applyconfigurationsoperatorv1alpha1.CertManagerApplyConfiguration, opts v1.ApplyOptions) (result *operatorv1alpha1.CertManager, err error) CertManagerExpansion } // certManagers implements CertManagerInterface type certManagers struct { - *gentype.ClientWithListAndApply[*v1alpha1.CertManager, *v1alpha1.CertManagerList, *operatorv1alpha1.CertManagerApplyConfiguration] + *gentype.ClientWithListAndApply[*operatorv1alpha1.CertManager, *operatorv1alpha1.CertManagerList, *applyconfigurationsoperatorv1alpha1.CertManagerApplyConfiguration] } // newCertManagers returns a CertManagers func newCertManagers(c *OperatorV1alpha1Client) *certManagers { return &certManagers{ - gentype.NewClientWithListAndApply[*v1alpha1.CertManager, *v1alpha1.CertManagerList, *operatorv1alpha1.CertManagerApplyConfiguration]( + gentype.NewClientWithListAndApply[*operatorv1alpha1.CertManager, *operatorv1alpha1.CertManagerList, *applyconfigurationsoperatorv1alpha1.CertManagerApplyConfiguration]( "certmanagers", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.CertManager { return &v1alpha1.CertManager{} }, - func() *v1alpha1.CertManagerList { return &v1alpha1.CertManagerList{} }), + func() *operatorv1alpha1.CertManager { return &operatorv1alpha1.CertManager{} }, + func() *operatorv1alpha1.CertManagerList { return &operatorv1alpha1.CertManagerList{} }, + ), } } diff --git a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_certmanager.go b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_certmanager.go index 5b0668d83..882e1e032 100644 --- a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_certmanager.go +++ b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_certmanager.go @@ -3,168 +3,35 @@ package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/applyconfigurations/operator/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + typedoperatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/typed/operator/v1alpha1" + gentype "k8s.io/client-go/gentype" ) -// FakeCertManagers implements CertManagerInterface -type FakeCertManagers struct { +// fakeCertManagers implements CertManagerInterface +type fakeCertManagers struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.CertManager, *v1alpha1.CertManagerList, *operatorv1alpha1.CertManagerApplyConfiguration] Fake *FakeOperatorV1alpha1 } -var certmanagersResource = v1alpha1.SchemeGroupVersion.WithResource("certmanagers") - -var certmanagersKind = v1alpha1.SchemeGroupVersion.WithKind("CertManager") - -// Get takes name of the certManager, and returns the corresponding certManager object, and an error if there is any. -func (c *FakeCertManagers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CertManager, err error) { - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(certmanagersResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CertManager), err -} - -// List takes label and field selectors, and returns the list of CertManagers that match those selectors. -func (c *FakeCertManagers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CertManagerList, err error) { - emptyResult := &v1alpha1.CertManagerList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(certmanagersResource, certmanagersKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.CertManagerList{ListMeta: obj.(*v1alpha1.CertManagerList).ListMeta} - for _, item := range obj.(*v1alpha1.CertManagerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested certManagers. -func (c *FakeCertManagers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(certmanagersResource, opts)) -} - -// Create takes the representation of a certManager and creates it. Returns the server's representation of the certManager, and an error, if there is any. -func (c *FakeCertManagers) Create(ctx context.Context, certManager *v1alpha1.CertManager, opts v1.CreateOptions) (result *v1alpha1.CertManager, err error) { - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(certmanagersResource, certManager, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CertManager), err -} - -// Update takes the representation of a certManager and updates it. Returns the server's representation of the certManager, and an error, if there is any. -func (c *FakeCertManagers) Update(ctx context.Context, certManager *v1alpha1.CertManager, opts v1.UpdateOptions) (result *v1alpha1.CertManager, err error) { - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(certmanagersResource, certManager, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CertManager), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertManagers) UpdateStatus(ctx context.Context, certManager *v1alpha1.CertManager, opts v1.UpdateOptions) (result *v1alpha1.CertManager, err error) { - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certmanagersResource, "status", certManager, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CertManager), err -} - -// Delete takes name of the certManager and deletes it. Returns an error if one occurs. -func (c *FakeCertManagers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(certmanagersResource, name, opts), &v1alpha1.CertManager{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCertManagers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(certmanagersResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.CertManagerList{}) - return err -} - -// Patch applies the patch and returns the patched certManager. -func (c *FakeCertManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertManager, err error) { - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certmanagersResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CertManager), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certManager. -func (c *FakeCertManagers) Apply(ctx context.Context, certManager *operatorv1alpha1.CertManagerApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertManager, err error) { - if certManager == nil { - return nil, fmt.Errorf("certManager provided to Apply must not be nil") - } - data, err := json.Marshal(certManager) - if err != nil { - return nil, err - } - name := certManager.Name - if name == nil { - return nil, fmt.Errorf("certManager.Name must be provided to Apply") - } - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certmanagersResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CertManager), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCertManagers) ApplyStatus(ctx context.Context, certManager *operatorv1alpha1.CertManagerApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertManager, err error) { - if certManager == nil { - return nil, fmt.Errorf("certManager provided to Apply must not be nil") - } - data, err := json.Marshal(certManager) - if err != nil { - return nil, err - } - name := certManager.Name - if name == nil { - return nil, fmt.Errorf("certManager.Name must be provided to Apply") - } - emptyResult := &v1alpha1.CertManager{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certmanagersResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeCertManagers(fake *FakeOperatorV1alpha1) typedoperatorv1alpha1.CertManagerInterface { + return &fakeCertManagers{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.CertManager, *v1alpha1.CertManagerList, *operatorv1alpha1.CertManagerApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("certmanagers"), + v1alpha1.SchemeGroupVersion.WithKind("CertManager"), + func() *v1alpha1.CertManager { return &v1alpha1.CertManager{} }, + func() *v1alpha1.CertManagerList { return &v1alpha1.CertManagerList{} }, + func(dst, src *v1alpha1.CertManagerList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.CertManagerList) []*v1alpha1.CertManager { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.CertManagerList, items []*v1alpha1.CertManager) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.CertManager), err } diff --git a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_istiocsr.go b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_istiocsr.go index 50930ed91..647e525c7 100644 --- a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_istiocsr.go +++ b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_istiocsr.go @@ -3,179 +3,33 @@ package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/applyconfigurations/operator/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + typedoperatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/typed/operator/v1alpha1" + gentype "k8s.io/client-go/gentype" ) -// FakeIstioCSRs implements IstioCSRInterface -type FakeIstioCSRs struct { +// fakeIstioCSRs implements IstioCSRInterface +type fakeIstioCSRs struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.IstioCSR, *v1alpha1.IstioCSRList, *operatorv1alpha1.IstioCSRApplyConfiguration] Fake *FakeOperatorV1alpha1 - ns string -} - -var istiocsrsResource = v1alpha1.SchemeGroupVersion.WithResource("istiocsrs") - -var istiocsrsKind = v1alpha1.SchemeGroupVersion.WithKind("IstioCSR") - -// Get takes name of the istioCSR, and returns the corresponding istioCSR object, and an error if there is any. -func (c *FakeIstioCSRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IstioCSR, err error) { - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(istiocsrsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IstioCSR), err -} - -// List takes label and field selectors, and returns the list of IstioCSRs that match those selectors. -func (c *FakeIstioCSRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IstioCSRList, err error) { - emptyResult := &v1alpha1.IstioCSRList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(istiocsrsResource, istiocsrsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.IstioCSRList{ListMeta: obj.(*v1alpha1.IstioCSRList).ListMeta} - for _, item := range obj.(*v1alpha1.IstioCSRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested istioCSRs. -func (c *FakeIstioCSRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(istiocsrsResource, c.ns, opts)) - -} - -// Create takes the representation of a istioCSR and creates it. Returns the server's representation of the istioCSR, and an error, if there is any. -func (c *FakeIstioCSRs) Create(ctx context.Context, istioCSR *v1alpha1.IstioCSR, opts v1.CreateOptions) (result *v1alpha1.IstioCSR, err error) { - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(istiocsrsResource, c.ns, istioCSR, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IstioCSR), err -} - -// Update takes the representation of a istioCSR and updates it. Returns the server's representation of the istioCSR, and an error, if there is any. -func (c *FakeIstioCSRs) Update(ctx context.Context, istioCSR *v1alpha1.IstioCSR, opts v1.UpdateOptions) (result *v1alpha1.IstioCSR, err error) { - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(istiocsrsResource, c.ns, istioCSR, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IstioCSR), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIstioCSRs) UpdateStatus(ctx context.Context, istioCSR *v1alpha1.IstioCSR, opts v1.UpdateOptions) (result *v1alpha1.IstioCSR, err error) { - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(istiocsrsResource, "status", c.ns, istioCSR, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IstioCSR), err -} - -// Delete takes name of the istioCSR and deletes it. Returns an error if one occurs. -func (c *FakeIstioCSRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(istiocsrsResource, c.ns, name, opts), &v1alpha1.IstioCSR{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIstioCSRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(istiocsrsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.IstioCSRList{}) - return err -} - -// Patch applies the patch and returns the patched istioCSR. -func (c *FakeIstioCSRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IstioCSR, err error) { - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(istiocsrsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IstioCSR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied istioCSR. -func (c *FakeIstioCSRs) Apply(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IstioCSR, err error) { - if istioCSR == nil { - return nil, fmt.Errorf("istioCSR provided to Apply must not be nil") - } - data, err := json.Marshal(istioCSR) - if err != nil { - return nil, err - } - name := istioCSR.Name - if name == nil { - return nil, fmt.Errorf("istioCSR.Name must be provided to Apply") - } - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(istiocsrsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IstioCSR), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIstioCSRs) ApplyStatus(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IstioCSR, err error) { - if istioCSR == nil { - return nil, fmt.Errorf("istioCSR provided to Apply must not be nil") - } - data, err := json.Marshal(istioCSR) - if err != nil { - return nil, err - } - name := istioCSR.Name - if name == nil { - return nil, fmt.Errorf("istioCSR.Name must be provided to Apply") - } - emptyResult := &v1alpha1.IstioCSR{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(istiocsrsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeIstioCSRs(fake *FakeOperatorV1alpha1, namespace string) typedoperatorv1alpha1.IstioCSRInterface { + return &fakeIstioCSRs{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.IstioCSR, *v1alpha1.IstioCSRList, *operatorv1alpha1.IstioCSRApplyConfiguration]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("istiocsrs"), + v1alpha1.SchemeGroupVersion.WithKind("IstioCSR"), + func() *v1alpha1.IstioCSR { return &v1alpha1.IstioCSR{} }, + func() *v1alpha1.IstioCSRList { return &v1alpha1.IstioCSRList{} }, + func(dst, src *v1alpha1.IstioCSRList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.IstioCSRList) []*v1alpha1.IstioCSR { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.IstioCSRList, items []*v1alpha1.IstioCSR) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.IstioCSR), err } diff --git a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go index a3dc68137..cc50d82f9 100644 --- a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go +++ b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go @@ -13,11 +13,11 @@ type FakeOperatorV1alpha1 struct { } func (c *FakeOperatorV1alpha1) CertManagers() v1alpha1.CertManagerInterface { - return &FakeCertManagers{c} + return newFakeCertManagers(c) } func (c *FakeOperatorV1alpha1) IstioCSRs(namespace string) v1alpha1.IstioCSRInterface { - return &FakeIstioCSRs{c, namespace} + return newFakeIstioCSRs(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/istiocsr.go b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/istiocsr.go index 484ec747c..3d0ebbbef 100644 --- a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/istiocsr.go +++ b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/istiocsr.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "context" + context "context" - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" - operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/applyconfigurations/operator/v1alpha1" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + applyconfigurationsoperatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/applyconfigurations/operator/v1alpha1" scheme "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type IstioCSRsGetter interface { // IstioCSRInterface has methods to work with IstioCSR resources. type IstioCSRInterface interface { - Create(ctx context.Context, istioCSR *v1alpha1.IstioCSR, opts v1.CreateOptions) (*v1alpha1.IstioCSR, error) - Update(ctx context.Context, istioCSR *v1alpha1.IstioCSR, opts v1.UpdateOptions) (*v1alpha1.IstioCSR, error) + Create(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSR, opts v1.CreateOptions) (*operatorv1alpha1.IstioCSR, error) + Update(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSR, opts v1.UpdateOptions) (*operatorv1alpha1.IstioCSR, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, istioCSR *v1alpha1.IstioCSR, opts v1.UpdateOptions) (*v1alpha1.IstioCSR, error) + UpdateStatus(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSR, opts v1.UpdateOptions) (*operatorv1alpha1.IstioCSR, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IstioCSR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IstioCSRList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*operatorv1alpha1.IstioCSR, error) + List(ctx context.Context, opts v1.ListOptions) (*operatorv1alpha1.IstioCSRList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IstioCSR, err error) - Apply(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IstioCSR, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1alpha1.IstioCSR, err error) + Apply(ctx context.Context, istioCSR *applyconfigurationsoperatorv1alpha1.IstioCSRApplyConfiguration, opts v1.ApplyOptions) (result *operatorv1alpha1.IstioCSR, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, istioCSR *operatorv1alpha1.IstioCSRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IstioCSR, err error) + ApplyStatus(ctx context.Context, istioCSR *applyconfigurationsoperatorv1alpha1.IstioCSRApplyConfiguration, opts v1.ApplyOptions) (result *operatorv1alpha1.IstioCSR, err error) IstioCSRExpansion } // istioCSRs implements IstioCSRInterface type istioCSRs struct { - *gentype.ClientWithListAndApply[*v1alpha1.IstioCSR, *v1alpha1.IstioCSRList, *operatorv1alpha1.IstioCSRApplyConfiguration] + *gentype.ClientWithListAndApply[*operatorv1alpha1.IstioCSR, *operatorv1alpha1.IstioCSRList, *applyconfigurationsoperatorv1alpha1.IstioCSRApplyConfiguration] } // newIstioCSRs returns a IstioCSRs func newIstioCSRs(c *OperatorV1alpha1Client, namespace string) *istioCSRs { return &istioCSRs{ - gentype.NewClientWithListAndApply[*v1alpha1.IstioCSR, *v1alpha1.IstioCSRList, *operatorv1alpha1.IstioCSRApplyConfiguration]( + gentype.NewClientWithListAndApply[*operatorv1alpha1.IstioCSR, *operatorv1alpha1.IstioCSRList, *applyconfigurationsoperatorv1alpha1.IstioCSRApplyConfiguration]( "istiocsrs", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha1.IstioCSR { return &v1alpha1.IstioCSR{} }, - func() *v1alpha1.IstioCSRList { return &v1alpha1.IstioCSRList{} }), + func() *operatorv1alpha1.IstioCSR { return &operatorv1alpha1.IstioCSR{} }, + func() *operatorv1alpha1.IstioCSRList { return &operatorv1alpha1.IstioCSRList{} }, + ), } } diff --git a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go index 9999c0b01..29d98139d 100644 --- a/pkg/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go +++ b/pkg/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" - "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/scheme" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + scheme "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -74,10 +74,10 @@ func New(c rest.Interface) *OperatorV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := operatorv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/pkg/operator/informers/externalversions/generic.go b/pkg/operator/informers/externalversions/generic.go index 4b5bae949..0c542fe66 100644 --- a/pkg/operator/informers/externalversions/generic.go +++ b/pkg/operator/informers/externalversions/generic.go @@ -3,7 +3,7 @@ package externalversions import ( - "fmt" + fmt "fmt" v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/operator/informers/externalversions/operator/v1alpha1/certmanager.go b/pkg/operator/informers/externalversions/operator/v1alpha1/certmanager.go index 2ea471969..a2e05ecdf 100644 --- a/pkg/operator/informers/externalversions/operator/v1alpha1/certmanager.go +++ b/pkg/operator/informers/externalversions/operator/v1alpha1/certmanager.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - "context" + context "context" time "time" - operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + apioperatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" versioned "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned" internalinterfaces "github.com/openshift/cert-manager-operator/pkg/operator/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/listers/operator/v1alpha1" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/listers/operator/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // CertManagers. type CertManagerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.CertManagerLister + Lister() operatorv1alpha1.CertManagerLister } type certManagerInformer struct { @@ -54,7 +54,7 @@ func NewFilteredCertManagerInformer(client versioned.Interface, resyncPeriod tim return client.OperatorV1alpha1().CertManagers().Watch(context.TODO(), options) }, }, - &operatorv1alpha1.CertManager{}, + &apioperatorv1alpha1.CertManager{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *certManagerInformer) defaultInformer(client versioned.Interface, resync } func (f *certManagerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&operatorv1alpha1.CertManager{}, f.defaultInformer) + return f.factory.InformerFor(&apioperatorv1alpha1.CertManager{}, f.defaultInformer) } -func (f *certManagerInformer) Lister() v1alpha1.CertManagerLister { - return v1alpha1.NewCertManagerLister(f.Informer().GetIndexer()) +func (f *certManagerInformer) Lister() operatorv1alpha1.CertManagerLister { + return operatorv1alpha1.NewCertManagerLister(f.Informer().GetIndexer()) } diff --git a/pkg/operator/informers/externalversions/operator/v1alpha1/istiocsr.go b/pkg/operator/informers/externalversions/operator/v1alpha1/istiocsr.go index 2461fa349..d36550da5 100644 --- a/pkg/operator/informers/externalversions/operator/v1alpha1/istiocsr.go +++ b/pkg/operator/informers/externalversions/operator/v1alpha1/istiocsr.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - "context" + context "context" time "time" - operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + apioperatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" versioned "github.com/openshift/cert-manager-operator/pkg/operator/clientset/versioned" internalinterfaces "github.com/openshift/cert-manager-operator/pkg/operator/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/listers/operator/v1alpha1" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/pkg/operator/listers/operator/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // IstioCSRs. type IstioCSRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.IstioCSRLister + Lister() operatorv1alpha1.IstioCSRLister } type istioCSRInformer struct { @@ -55,7 +55,7 @@ func NewFilteredIstioCSRInformer(client versioned.Interface, namespace string, r return client.OperatorV1alpha1().IstioCSRs(namespace).Watch(context.TODO(), options) }, }, - &operatorv1alpha1.IstioCSR{}, + &apioperatorv1alpha1.IstioCSR{}, resyncPeriod, indexers, ) @@ -66,9 +66,9 @@ func (f *istioCSRInformer) defaultInformer(client versioned.Interface, resyncPer } func (f *istioCSRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&operatorv1alpha1.IstioCSR{}, f.defaultInformer) + return f.factory.InformerFor(&apioperatorv1alpha1.IstioCSR{}, f.defaultInformer) } -func (f *istioCSRInformer) Lister() v1alpha1.IstioCSRLister { - return v1alpha1.NewIstioCSRLister(f.Informer().GetIndexer()) +func (f *istioCSRInformer) Lister() operatorv1alpha1.IstioCSRLister { + return operatorv1alpha1.NewIstioCSRLister(f.Informer().GetIndexer()) } diff --git a/pkg/operator/listers/operator/v1alpha1/certmanager.go b/pkg/operator/listers/operator/v1alpha1/certmanager.go index d9e7fbd4d..6d86096c8 100644 --- a/pkg/operator/listers/operator/v1alpha1/certmanager.go +++ b/pkg/operator/listers/operator/v1alpha1/certmanager.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CertManagerLister helps list CertManagers. @@ -14,19 +14,19 @@ import ( type CertManagerLister interface { // List lists all CertManagers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.CertManager, err error) + List(selector labels.Selector) (ret []*operatorv1alpha1.CertManager, err error) // Get retrieves the CertManager from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.CertManager, error) + Get(name string) (*operatorv1alpha1.CertManager, error) CertManagerListerExpansion } // certManagerLister implements the CertManagerLister interface. type certManagerLister struct { - listers.ResourceIndexer[*v1alpha1.CertManager] + listers.ResourceIndexer[*operatorv1alpha1.CertManager] } // NewCertManagerLister returns a new CertManagerLister. func NewCertManagerLister(indexer cache.Indexer) CertManagerLister { - return &certManagerLister{listers.New[*v1alpha1.CertManager](indexer, v1alpha1.Resource("certmanager"))} + return &certManagerLister{listers.New[*operatorv1alpha1.CertManager](indexer, operatorv1alpha1.Resource("certmanager"))} } diff --git a/pkg/operator/listers/operator/v1alpha1/istiocsr.go b/pkg/operator/listers/operator/v1alpha1/istiocsr.go index 1bd74c55f..596dc93f3 100644 --- a/pkg/operator/listers/operator/v1alpha1/istiocsr.go +++ b/pkg/operator/listers/operator/v1alpha1/istiocsr.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + operatorv1alpha1 "github.com/openshift/cert-manager-operator/api/operator/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IstioCSRLister helps list IstioCSRs. @@ -14,7 +14,7 @@ import ( type IstioCSRLister interface { // List lists all IstioCSRs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IstioCSR, err error) + List(selector labels.Selector) (ret []*operatorv1alpha1.IstioCSR, err error) // IstioCSRs returns an object that can list and get IstioCSRs. IstioCSRs(namespace string) IstioCSRNamespaceLister IstioCSRListerExpansion @@ -22,17 +22,17 @@ type IstioCSRLister interface { // istioCSRLister implements the IstioCSRLister interface. type istioCSRLister struct { - listers.ResourceIndexer[*v1alpha1.IstioCSR] + listers.ResourceIndexer[*operatorv1alpha1.IstioCSR] } // NewIstioCSRLister returns a new IstioCSRLister. func NewIstioCSRLister(indexer cache.Indexer) IstioCSRLister { - return &istioCSRLister{listers.New[*v1alpha1.IstioCSR](indexer, v1alpha1.Resource("istiocsr"))} + return &istioCSRLister{listers.New[*operatorv1alpha1.IstioCSR](indexer, operatorv1alpha1.Resource("istiocsr"))} } // IstioCSRs returns an object that can list and get IstioCSRs. func (s *istioCSRLister) IstioCSRs(namespace string) IstioCSRNamespaceLister { - return istioCSRNamespaceLister{listers.NewNamespaced[*v1alpha1.IstioCSR](s.ResourceIndexer, namespace)} + return istioCSRNamespaceLister{listers.NewNamespaced[*operatorv1alpha1.IstioCSR](s.ResourceIndexer, namespace)} } // IstioCSRNamespaceLister helps list and get IstioCSRs. @@ -40,15 +40,15 @@ func (s *istioCSRLister) IstioCSRs(namespace string) IstioCSRNamespaceLister { type IstioCSRNamespaceLister interface { // List lists all IstioCSRs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IstioCSR, err error) + List(selector labels.Selector) (ret []*operatorv1alpha1.IstioCSR, err error) // Get retrieves the IstioCSR from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.IstioCSR, error) + Get(name string) (*operatorv1alpha1.IstioCSR, error) IstioCSRNamespaceListerExpansion } // istioCSRNamespaceLister implements the IstioCSRNamespaceLister // interface. type istioCSRNamespaceLister struct { - listers.ResourceIndexer[*v1alpha1.IstioCSR] + listers.ResourceIndexer[*operatorv1alpha1.IstioCSR] } diff --git a/pkg/operator/operatorclient/interfaces.go b/pkg/operator/operatorclient/interfaces.go index cf5fe9234..6c634a441 100644 --- a/pkg/operator/operatorclient/interfaces.go +++ b/pkg/operator/operatorclient/interfaces.go @@ -1,6 +1,9 @@ package operatorclient const ( - TargetNamespace = "cert-manager" + TargetNamespace = "cert-manager" +) + +var ( OperatorNamespace = "cert-manager-operator" ) diff --git a/pkg/operator/starter.go b/pkg/operator/starter.go index 03889a071..3b64d0214 100644 --- a/pkg/operator/starter.go +++ b/pkg/operator/starter.go @@ -71,10 +71,13 @@ func RunOperator(ctx context.Context, cc *controllercmd.ControllerContext) error versionRecorder := status.NewVersionGetter() versionRecorder.SetVersion("operator", status.VersionForOperatorFromEnv()) + operatorclient.OperatorNamespace = cc.OperatorNamespace + kubeInformersForNamespaces := v1helpers.NewKubeInformersForNamespaces(kubeClient, "", "kube-system", operatorclient.TargetNamespace, + operatorclient.OperatorNamespace, ) configClient, err := configv1client.NewForConfig(cc.KubeConfig) diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion new file mode 100644 index 000000000..26bc914a3 --- /dev/null +++ b/vendor/cel.dev/expr/.bazelversion @@ -0,0 +1,2 @@ +7.0.1 +# Keep this pinned version in parity with cel-go diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes new file mode 100644 index 000000000..3de1ec213 --- /dev/null +++ b/vendor/cel.dev/expr/.gitattributes @@ -0,0 +1,2 @@ +*.pb.go linguist-generated=true +*.pb.go -diff -merge diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore new file mode 100644 index 000000000..0d4fed27c --- /dev/null +++ b/vendor/cel.dev/expr/.gitignore @@ -0,0 +1,2 @@ +bazel-* +MODULE.bazel.lock diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel new file mode 100644 index 000000000..37d8adc95 --- /dev/null +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +go_library( + name = "expr", + srcs = [ + "checked.pb.go", + "eval.pb.go", + "explain.pb.go", + "syntax.pb.go", + "value.pb.go", + ], + importpath = "cel.dev/expr", + visibility = ["//visibility:public"], + deps = [ + "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +alias( + name = "go_default_library", + actual = ":expr", + visibility = ["//visibility:public"], +) diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..59908e2d8 --- /dev/null +++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md @@ -0,0 +1,25 @@ +# Contributor Code of Conduct +## Version 0.1.1 (adapted from 0.3b-angular) + +As contributors and maintainers of the Common Expression Language +(CEL) project, we pledge to respect everyone who contributes by +posting issues, updating documentation, submitting pull requests, +providing feedback in comments, and any other activities. + +Communication through any of CEL's channels (GitHub, Gitter, IRC, +mailing lists, Google+, Twitter, etc.) must be constructive and never +resort to personal attacks, trolling, public or private harassment, +insults, or other unprofessional conduct. + +We promise to extend courtesy and respect to everyone involved in this +project regardless of gender, gender identity, sexual orientation, +disability, age, race, ethnicity, religion, or level of experience. We +expect anyone contributing to the project to do the same. + +If any member of the community violates this code of conduct, the +maintainers of the CEL project may take action, removing issues, +comments, and PRs or blocking accounts as deemed appropriate. + +If you are subject to or witness unacceptable behavior, or have any +other concerns, please email us at +[cel-conduct@google.com](mailto:cel-conduct@google.com). diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md new file mode 100644 index 000000000..8f5fd5c31 --- /dev/null +++ b/vendor/cel.dev/expr/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are a +few guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## What to expect from maintainers + +Expect maintainers to respond to new issues or pull requests within a week. +For outstanding and ongoing issues and particularly for long-running +pull requests, expect the maintainers to review within a week of a +contributor asking for a new review. There is no commitment to resolution -- +merging or closing a pull request, or fixing or closing an issue -- because some +issues will require more discussion than others. diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md new file mode 100644 index 000000000..0a525bc17 --- /dev/null +++ b/vendor/cel.dev/expr/GOVERNANCE.md @@ -0,0 +1,43 @@ +# Project Governance + +This document defines the governance process for the CEL language. CEL is +Google-developed, but openly governed. Major contributors to the CEL +specification and its corresponding implementations constitute the CEL +Language Council. New members may be added by a unanimous vote of the +Council. + +The MAINTAINERS.md file lists the members of the CEL Language Council, and +unofficially indicates the "areas of expertise" of each member with respect +to the publicly available CEL repos. + +## Code Changes + +Code changes must follow the standard pull request (PR) model documented in the +CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a +maintainer. The maintainer reserves the right to request that any feature +request (FR) or PR be reviewed by the language council. + +## Syntax and Semantic Changes + +Syntactic and semantic changes must be reviewed by the CEL Language Council. +Maintainers may also request language council review at their discretion. + +The review process is as follows: + +- Create a Feature Request in the CEL-Spec repo. The feature description will + serve as an abstract for the detailed design document. +- Co-develop a design document with the Language Council. +- Once the proposer gives the design document approval, the document will be + linked to the FR in the CEL-Spec repo and opened for comments to members of + the cel-lang-discuss@googlegroups.com. +- The Language Council will review the design doc at the next council meeting + (once every three weeks) and the council decision included in the document. + +If the proposal is approved, the spec will be updated by a maintainer (if +applicable) and a rationale will be included in the CEL-Spec wiki to ensure +future developers may follow CEL's growth and direction over time. + +Approved proposals may be implemented by the proposer or by the maintainers as +the parties see fit. At the discretion of the maintainer, changes from the +approved design are permitted during implementation if they improve the user +experience and clarity of the feature. diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/cel.dev/expr/LICENSE similarity index 67% rename from vendor/github.com/klauspost/compress/LICENSE rename to vendor/cel.dev/expr/LICENSE index 87d557477..d64569567 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/cel.dev/expr/LICENSE @@ -1,35 +1,3 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* Apache License Version 2.0, January 2004 @@ -219,7 +187,7 @@ Files: gzhttp/* same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016-2017 The New York Times Company + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -232,73 +200,3 @@ Files: gzhttp/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md new file mode 100644 index 000000000..1ed2eb8ab --- /dev/null +++ b/vendor/cel.dev/expr/MAINTAINERS.md @@ -0,0 +1,13 @@ +# CEL Language Council + +| Name | Company | Area of Expertise | +|-----------------|--------------|-------------------| +| Alfred Fuller | Facebook | cel-cpp, cel-spec | +| Jim Larson | Google | cel-go, cel-spec | +| Matthais Blume | Google | cel-spec | +| Tristan Swadell | Google | cel-go, cel-spec | + +## Emeritus + +* Sanjay Ghemawat (Google) +* Wolfgang Grieskamp (Facebook) diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel new file mode 100644 index 000000000..9794266f5 --- /dev/null +++ b/vendor/cel.dev/expr/MODULE.bazel @@ -0,0 +1,70 @@ +module( + name = "cel-spec", +) + +bazel_dep( + name = "bazel_skylib", + version = "1.7.1", +) +bazel_dep( + name = "gazelle", + version = "0.36.0", + repo_name = "bazel_gazelle", +) +bazel_dep( + name = "googleapis", + version = "0.0.0-20240819-fe8ba054a", + repo_name = "com_google_googleapis", +) +bazel_dep( + name = "protobuf", + version = "26.0", + repo_name = "com_google_protobuf", +) +bazel_dep( + name = "rules_cc", + version = "0.0.9", +) +bazel_dep( + name = "rules_go", + version = "0.49.0", + repo_name = "io_bazel_rules_go", +) +bazel_dep( + name = "rules_java", + version = "7.6.5", +) +bazel_dep( + name = "rules_proto", + version = "6.0.0", +) +bazel_dep( + name = "rules_python", + version = "0.35.0", +) + +### PYTHON ### +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + ignore_root_user_error = True, + python_version = "3.11", +) + +switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") +switched_rules.use_languages( + cc = True, + go = True, + java = True, +) +use_repo(switched_rules, "com_google_googleapis_imports") + +go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") +go_sdk.download(version = "1.21.1") + +go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") +go_deps.from_file(go_mod = "//:go.mod") +use_repo( + go_deps, + "org_golang_google_genproto_googleapis_rpc", + "org_golang_google_protobuf", +) diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md new file mode 100644 index 000000000..7930c0b75 --- /dev/null +++ b/vendor/cel.dev/expr/README.md @@ -0,0 +1,73 @@ +# Common Expression Language + +The Common Expression Language (CEL) implements common semantics for expression +evaluation, enabling different applications to more easily interoperate. + +Key Applications + +* Security policy: organizations have complex infrastructure and need common + tooling to reason about the system as a whole +* Protocols: expressions are a useful data type and require interoperability + across programming languages and platforms. + + +Guiding philosophy: + +1. Keep it small & fast. + * CEL evaluates in linear time, is mutation free, and not Turing-complete. + This limitation is a feature of the language design, which allows the + implementation to evaluate orders of magnitude faster than equivalently + sandboxed JavaScript. +2. Make it extensible. + * CEL is designed to be embedded in applications, and allows for + extensibility via its context which allows for functions and data to be + provided by the software that embeds it. +3. Developer-friendly. + * The language is approachable to developers. The initial spec was based + on the experience of developing Firebase Rules and usability testing + many prior iterations. + * The library itself and accompanying toolings should be easy to adopt by + teams that seek to integrate CEL into their platforms. + +The required components of a system that supports CEL are: + +* The textual representation of an expression as written by a developer. It is + of similar syntax to expressions in C/C++/Java/JavaScript +* A representation of the program's abstract syntax tree (AST). +* A compiler library that converts the textual representation to the binary + representation. This can be done ahead of time (in the control plane) or + just before evaluation (in the data plane). +* A context containing one or more typed variables, often protobuf messages. + Most use-cases will use `attribute_context.proto` +* An evaluator library that takes the binary format in the context and + produces a result, usually a Boolean. + +For use cases which require persistence or cross-process communcation, it is +highly recommended to serialize the type-checked expression as a protocol +buffer. The CEL team will maintains canonical protocol buffers for ASTs and +will keep these versions identical and wire-compatible in perpetuity: + +* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr) +* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1) + + +Example of boolean conditions and object construction: + +``` c +// Condition +account.balance >= transaction.withdrawal + || (account.overdraftProtection + && account.overdraftLimit >= transaction.withdrawal - account.balance) + +// Object construction +common.GeoPoint{ latitude: 10.0, longitude: -5.5 } +``` + +For more detail, see: + +* [Introduction](doc/intro.md) +* [Language Definition](doc/langdef.md) + +Released under the [Apache License](LICENSE). + +Disclaimer: This is not an official Google product. diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE new file mode 100644 index 000000000..b6dc9ed67 --- /dev/null +++ b/vendor/cel.dev/expr/WORKSPACE @@ -0,0 +1,145 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_go", + sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + ], +) + +http_archive( + name = "rules_proto", + sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d", + strip_prefix = "rules_proto-4.0.0-3.20.0", + urls = [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz", + ], +) + +# googleapis as of 09/16/2024 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee", + sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8", + urls = [ + "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz", + ], +) + +# protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2", + strip_prefix = "protobuf-3.21.5", + urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"], +) + +# googletest +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21", + strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8", + urls = [ + "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + ], +) + +# absl +http_archive( + name = "com_google_absl", + strip_prefix = "abseil-cpp-master", + urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + cc = True, +) + +# Do *not* call *_dependencies(), etc, yet. See comment at the end. + +# Generated Google APIs protos for Golang +# Generated Google APIs protos for Golang 08/26/2024 +go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=", + version = "v0.0.0-20240826202546-f6391c0de4c7", +) + +# Generated Google APIs protos for Golang 08/26/2024 +go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=", + version = "v0.0.0-20240826202546-f6391c0de4c7", +) + +# gRPC deps +go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + tag = "v1.49.0", +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=", + version = "v0.0.0-20190311183353-d8887717615a", +) + +go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", +) + +# Run the dependencies at the end. These will silently try to import some +# of the above repositories but at different versions, so ours must come first. +go_rules_dependencies() +go_register_toolchains(version = "1.19.1") +gazelle_dependencies() +rules_proto_dependencies() +rules_proto_toolchains() +protobuf_deps() diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/cel.dev/expr/WORKSPACE.bzlmod similarity index 100% rename from vendor/github.com/klauspost/compress/s2sx.sum rename to vendor/cel.dev/expr/WORKSPACE.bzlmod diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go new file mode 100644 index 000000000..bb225c8ab --- /dev/null +++ b/vendor/cel.dev/expr/checked.pb.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/checked.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Type_PrimitiveType int32 + +const ( + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + Type_BOOL Type_PrimitiveType = 1 + Type_INT64 Type_PrimitiveType = 2 + Type_UINT64 Type_PrimitiveType = 3 + Type_DOUBLE Type_PrimitiveType = 4 + Type_STRING Type_PrimitiveType = 5 + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +type Type_WellKnownType int32 + +const ( + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + Type_ANY Type_WellKnownType = 1 + Type_TIMESTAMP Type_WellKnownType = 2 + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue(0) +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_cel_expr_checked_proto protoreflect.FileDescriptor + +var file_cel_expr_checked_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, + 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, + 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, + 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, + 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_checked_proto_rawDescOnce sync.Once + file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc +) + +func file_cel_expr_checked_proto_rawDescGZIP() []byte { + file_cel_expr_checked_proto_rawDescOnce.Do(func() { + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + }) + return file_cel_expr_checked_proto_rawDescData +} + +var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cel_expr_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType + (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr + (*Type)(nil), // 3: cel.expr.Type + (*Decl)(nil), // 4: cel.expr.Decl + (*Reference)(nil), // 5: cel.expr.Reference + nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry + nil, // 7: cel.expr.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: cel.expr.Type.ListType + (*Type_MapType)(nil), // 9: cel.expr.Type.MapType + (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType + (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: cel.expr.SourceInfo + (*Expr)(nil), // 16: cel.expr.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: cel.expr.Constant +} +var file_cel_expr_checked_proto_depIdxs = []int32{ + 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry + 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry + 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo + 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr + 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType + 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType + 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType + 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType + 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType + 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType + 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type + 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty + 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType + 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl + 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl + 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant + 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference + 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type + 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type + 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type + 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type + 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type + 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type + 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type + 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type + 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant + 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload + 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type + 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_cel_expr_checked_proto_init() } +func file_cel_expr_checked_proto_init() { + if File_cel_expr_checked_proto != nil { + return + } + file_cel_expr_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_checked_proto_goTypes, + DependencyIndexes: file_cel_expr_checked_proto_depIdxs, + EnumInfos: file_cel_expr_checked_proto_enumTypes, + MessageInfos: file_cel_expr_checked_proto_msgTypes, + }.Build() + File_cel_expr_checked_proto = out.File + file_cel_expr_checked_proto_rawDesc = nil + file_cel_expr_checked_proto_goTypes = nil + file_cel_expr_checked_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml new file mode 100644 index 000000000..c40881f12 --- /dev/null +++ b/vendor/cel.dev/expr/cloudbuild.yaml @@ -0,0 +1,9 @@ +steps: +- name: 'gcr.io/cloud-builders/bazel:7.0.1' + entrypoint: bazel + args: ['build', '...'] + id: bazel-build + waitFor: ['-'] +timeout: 15m +options: + machineType: 'N1_HIGHCPU_32' diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go new file mode 100644 index 000000000..8f651f9cc --- /dev/null +++ b/vendor/cel.dev/expr/eval.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/eval.proto + +package expr + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_cel_expr_eval_proto protoreflect.FileDescriptor + +var file_cel_expr_eval_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_eval_proto_rawDescOnce sync.Once + file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc +) + +func file_cel_expr_eval_proto_rawDescGZIP() []byte { + file_cel_expr_eval_proto_rawDescOnce.Do(func() { + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + }) + return file_cel_expr_eval_proto_rawDescData +} + +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: cel.expr.EvalState + (*ExprValue)(nil), // 1: cel.expr.ExprValue + (*ErrorSet)(nil), // 2: cel.expr.ErrorSet + (*UnknownSet)(nil), // 3: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result + (*Value)(nil), // 5: cel.expr.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_cel_expr_eval_proto_depIdxs = []int32{ + 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue + 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet + 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_cel_expr_eval_proto_init() } +func file_cel_expr_eval_proto_init() { + if File_cel_expr_eval_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_eval_proto_goTypes, + DependencyIndexes: file_cel_expr_eval_proto_depIdxs, + MessageInfos: file_cel_expr_eval_proto_msgTypes, + }.Build() + File_cel_expr_eval_proto = out.File + file_cel_expr_eval_proto_rawDesc = nil + file_cel_expr_eval_proto_goTypes = nil + file_cel_expr_eval_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go new file mode 100644 index 000000000..79fd5443b --- /dev/null +++ b/vendor/cel.dev/expr/explain.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/explain.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_cel_expr_explain_proto protoreflect.FileDescriptor + +var file_cel_expr_explain_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_explain_proto_rawDescOnce sync.Once + file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc +) + +func file_cel_expr_explain_proto_rawDescGZIP() []byte { + file_cel_expr_explain_proto_rawDescOnce.Do(func() { + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + }) + return file_cel_expr_explain_proto_rawDescData +} + +var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cel_expr_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: cel.expr.Explain + (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep + (*Value)(nil), // 2: cel.expr.Value +} +var file_cel_expr_explain_proto_depIdxs = []int32{ + 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value + 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_cel_expr_explain_proto_init() } +func file_cel_expr_explain_proto_init() { + if File_cel_expr_explain_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_explain_proto_goTypes, + DependencyIndexes: file_cel_expr_explain_proto_depIdxs, + MessageInfos: file_cel_expr_explain_proto_msgTypes, + }.Build() + File_cel_expr_explain_proto = out.File + file_cel_expr_explain_proto_rawDesc = nil + file_cel_expr_explain_proto_goTypes = nil + file_cel_expr_explain_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh new file mode 100644 index 000000000..fdcbb3ce2 --- /dev/null +++ b/vendor/cel.dev/expr/regen_go_proto.sh @@ -0,0 +1,9 @@ +#!/bin/sh +bazel build //proto/cel/expr/conformance/... +files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/') + echo "copying $dst" + $(cp $src $dst) +done diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh new file mode 100644 index 000000000..9a13479e4 --- /dev/null +++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +bazel build //proto/cel/expr:all + +rm -vf ./*.pb.go + +files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") ) +for src in "${files[@]}"; +do + cp -v "${src}" ./ +done diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go new file mode 100644 index 000000000..48a952872 --- /dev/null +++ b/vendor/cel.dev/expr/syntax.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/syntax.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SourceInfo_Extension_Component int32 + +const ( + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_cel_expr_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_cel_expr_syntax_proto protoreflect.FileDescriptor + +var file_cel_expr_syntax_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, + 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, + 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, + 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, + 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, + 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, + 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, + 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, + 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, + 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, + 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, + 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, + 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, + 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, + 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, + 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, + 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, + 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cel_expr_syntax_proto_rawDescOnce sync.Once + file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc +) + +func file_cel_expr_syntax_proto_rawDescGZIP() []byte { + file_cel_expr_syntax_proto_rawDescOnce.Do(func() { + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + }) + return file_cel_expr_syntax_proto_rawDescData +} + +var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cel_expr_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr + (*Expr)(nil), // 2: cel.expr.Expr + (*Constant)(nil), // 3: cel.expr.Constant + (*SourceInfo)(nil), // 4: cel.expr.SourceInfo + (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident + (*Expr_Select)(nil), // 6: cel.expr.Expr.Select + (*Expr_Call)(nil), // 7: cel.expr.Expr.Call + (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList + (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry + nil, // 12: cel.expr.SourceInfo.PositionsEntry + nil, // 13: cel.expr.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension + (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 16: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp +} +var file_cel_expr_syntax_proto_depIdxs = []int32{ + 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr + 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo + 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant + 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident + 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select + 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call + 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList + 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct + 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension + 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue + 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration + 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry + 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry + 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension + 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr + 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr + 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr + 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr + 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry + 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr + 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr + 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr + 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr + 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr + 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr + 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr + 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr + 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component + 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_cel_expr_syntax_proto_init() } +func file_cel_expr_syntax_proto_init() { + if File_cel_expr_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_syntax_proto_goTypes, + DependencyIndexes: file_cel_expr_syntax_proto_depIdxs, + EnumInfos: file_cel_expr_syntax_proto_enumTypes, + MessageInfos: file_cel_expr_syntax_proto_msgTypes, + }.Build() + File_cel_expr_syntax_proto = out.File + file_cel_expr_syntax_proto_rawDesc = nil + file_cel_expr_syntax_proto_goTypes = nil + file_cel_expr_syntax_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go new file mode 100644 index 000000000..e5e29228c --- /dev/null +++ b/vendor/cel.dev/expr/value.pb.go @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/value.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_cel_expr_value_proto protoreflect.FileDescriptor + +var file_cel_expr_value_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, + 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_value_proto_rawDescOnce sync.Once + file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc +) + +func file_cel_expr_value_proto_rawDescGZIP() []byte { + file_cel_expr_value_proto_rawDescOnce.Do(func() { + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + }) + return file_cel_expr_value_proto_rawDescData +} + +var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: cel.expr.Value + (*EnumValue)(nil), // 1: cel.expr.EnumValue + (*ListValue)(nil), // 2: cel.expr.ListValue + (*MapValue)(nil), // 3: cel.expr.MapValue + (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_cel_expr_value_proto_depIdxs = []int32{ + 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue + 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue + 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue + 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value + 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry + 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value + 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cel_expr_value_proto_init() } +func file_cel_expr_value_proto_init() { + if File_cel_expr_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_value_proto_goTypes, + DependencyIndexes: file_cel_expr_value_proto_depIdxs, + MessageInfos: file_cel_expr_value_proto_msgTypes, + }.Build() + File_cel_expr_value_proto = out.File + file_cel_expr_value_proto_rawDesc = nil + file_cel_expr_value_proto_goTypes = nil + file_cel_expr_value_proto_depIdxs = nil +} diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md index e2531e49c..172a02e0b 100644 --- a/vendor/github.com/distribution/reference/README.md +++ b/vendor/github.com/distribution/reference/README.md @@ -10,7 +10,7 @@ Go library to handle references to container images. [![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) -This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. +This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. ## Contribution diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go index a30229d01..f4128314c 100644 --- a/vendor/github.com/distribution/reference/normalize.go +++ b/vendor/github.com/distribution/reference/normalize.go @@ -123,20 +123,51 @@ func ParseDockerRef(ref string) (Named, error) { // splitDockerDomain splits a repository name to domain and remote-name. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoPrefix + remainder - } - return +func splitDockerDomain(name string) (domain, remoteName string) { + maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/") + if !ok { + // Fast-path for single element ("familiar" names), such as "ubuntu" + // or "ubuntu:latest". Familiar names must be handled separately, to + // prevent them from being handled as "hostname:port". + // + // Canonicalize them as "docker.io/library/name[:tag]" + + // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain. + return defaultDomain, officialRepoPrefix + name + } + + switch { + case maybeDomain == localhost: + // localhost is a reserved namespace and always considered a domain. + domain, remoteName = maybeDomain, maybeRemoteName + case maybeDomain == legacyDefaultDomain: + // canonicalize the Docker Hub and legacy "Docker Index" domains. + domain, remoteName = defaultDomain, maybeRemoteName + case strings.ContainsAny(maybeDomain, ".:"): + // Likely a domain or IP-address: + // + // - contains a "." (e.g., "example.com" or "127.0.0.1") + // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000") + domain, remoteName = maybeDomain, maybeRemoteName + case strings.ToLower(maybeDomain) != maybeDomain: + // Uppercase namespaces are not allowed, so if the first element + // is not lowercase, we assume it to be a domain-name. + domain, remoteName = maybeDomain, maybeRemoteName + default: + // None of the above: it's not a domain, so use the default, and + // use the name input the remote-name. + domain, remoteName = defaultDomain, name + } + + if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') { + // Canonicalize "familiar" names, but only on Docker Hub, not + // on other domains: + // + // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]" + remoteName = officialRepoPrefix + remoteName + } + + return domain, remoteName } // familiarizeName returns a shortened version of the name familiar diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go index e98c44daa..900398bde 100644 --- a/vendor/github.com/distribution/reference/reference.go +++ b/vendor/github.com/distribution/reference/reference.go @@ -35,8 +35,13 @@ import ( ) const ( + // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name. + RepositoryNameTotalLengthMax = 255 + // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 + // + // Deprecated: use [RepositoryNameTotalLengthMax] instead. + NameTotalLengthMax = RepositoryNameTotalLengthMax ) var ( @@ -55,8 +60,8 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") @@ -165,6 +170,9 @@ func Path(named Named) (name string) { return path } +// splitDomain splits a named reference into a hostname and path string. +// If no valid hostname is found, the hostname is empty and the full value +// is returned as name func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { @@ -173,19 +181,6 @@ func splitDomain(name string) (string, string) { return match[1], match[2] } -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [Domain] or [Path]. -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. func Parse(s string) (Reference, error) { @@ -200,10 +195,6 @@ func Parse(s string) (Reference, error) { return nil, ErrReferenceInvalidFormat } - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) @@ -215,6 +206,10 @@ func Parse(s string) (Reference, error) { repo.path = matches[1] } + if len(repo.path) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + ref := reference{ namedRepository: repo, tag: matches[2], @@ -253,14 +248,15 @@ func ParseNamed(s string) (Named, error) { // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } + + if len(match[2]) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + return repository{ domain: match[1], path: match[2], diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore deleted file mode 100644 index b7ed7f956..000000000 --- a/vendor/github.com/evanphx/json-patch/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# editor and IDE paraphernalia -.idea -.vscode - -# macOS paraphernalia -.DS_Store diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index df76d7d77..000000000 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index 97e319b21..000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,317 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -These global variables control the behavior of `jsonpatch.Apply`. - -An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior -is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. - -Structure `jsonpatch.ApplyOptions` includes the configuration options above -and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. - -When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore -`remove` operations whose `path` points to a non-existent location in the JSON document. -`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` -returning an error when hitting a missing `path` on `remove`. - -When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure -that `add` operations produce all the `path` elements that are missing from the target object. - -Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` -whose values are populated from the global configuration variables. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go deleted file mode 100644 index 75304b443..000000000 --- a/vendor/github.com/evanphx/json-patch/errors.go +++ /dev/null @@ -1,38 +0,0 @@ -package jsonpatch - -import "fmt" - -// AccumulatedCopySizeError is an error type returned when the accumulated size -// increase caused by copy operations in a patch operation has exceeded the -// limit. -type AccumulatedCopySizeError struct { - limit int64 - accumulated int64 -} - -// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. -func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { - return &AccumulatedCopySizeError{limit: l, accumulated: a} -} - -// Error implements the error interface. -func (a *AccumulatedCopySizeError) Error() string { - return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) -} - -// ArraySizeError is an error type returned when the array size has exceeded -// the limit. -type ArraySizeError struct { - limit int - size int -} - -// NewArraySizeError returns an ArraySizeError. -func NewArraySizeError(l, s int) *ArraySizeError { - return &ArraySizeError{limit: l, size: s} -} - -// Error implements the error interface. -func (a *ArraySizeError) Error() string { - return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) -} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index ad88d4018..000000000 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,389 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - if !mergeMerge { - pruneNulls(v) - } - - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - } - newAry = append(newAry, v) - } - - *ary = newAry - - return ary -} - -var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") -var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, ErrBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, ErrBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// resemblesJSONArray indicates whether the byte-slice "appears" to be -// a JSON array or not. -// False-positives are possible, as this function does not check the internal -// structure of the array. It only checks that the outer syntax is present and -// correct. -func resemblesJSONArray(input []byte) bool { - input = bytes.TrimSpace(input) - - hasPrefix := bytes.HasPrefix(input, []byte("[")) - hasSuffix := bytes.HasSuffix(input, []byte("]")) - - return hasPrefix && hasSuffix -} - -// CreateMergePatch will return a merge patch document capable of converting -// the original document(s) to the modified document(s). -// The parameters can be bytes of either two JSON Documents, or two arrays of -// JSON documents. -// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalResemblesArray := resemblesJSONArray(originalJSON) - modifiedResemblesArray := resemblesJSONArray(modifiedJSON) - - // Do both byte-slices seem like JSON arrays? - if originalResemblesArray && modifiedResemblesArray { - return createArrayMergePatch(originalJSON, modifiedJSON) - } - - // Are both byte-slices are not arrays? Then they are likely JSON objects... - if !originalResemblesArray && !modifiedResemblesArray { - return createObjectMergePatch(originalJSON, modifiedJSON) - } - - // None of the above? Then return an error because of mismatched types. - return nil, errBadMergeTypes -} - -// createObjectMergePatch will return a merge-patch document capable of -// converting the original document to the modified document. -func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDoc := map[string]interface{}{} - modifiedDoc := map[string]interface{}{} - - err := json.Unmarshal(originalJSON, &originalDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - dest, err := getDiff(originalDoc, modifiedDoc) - if err != nil { - return nil, err - } - - return json.Marshal(dest) -} - -// createArrayMergePatch will return an array of merge-patch documents capable -// of converting the original document to the modified document for each -// pair of JSON documents provided in the arrays. -// Arrays of mismatched sizes will result in an error. -func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDocs := []json.RawMessage{} - modifiedDocs := []json.RawMessage{} - - err := json.Unmarshal(originalJSON, &originalDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - total := len(originalDocs) - if len(modifiedDocs) != total { - return nil, ErrBadJSONDoc - } - - result := []json.RawMessage{} - for i := 0; i < len(originalDocs); i++ { - original := originalDocs[i] - modified := modifiedDocs[i] - - patch, err := createObjectMergePatch(original, modified) - if err != nil { - return nil, err - } - - result = append(result, json.RawMessage(patch)) - } - - return json.Marshal(result) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - if len(bt) != len(at) { - return false - } - for key := range bt { - av, aOK := at[key] - bv, bOK := bt[key] - if aOK != bOK { - return false - } - if !matchesValue(av, bv) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index cd0274e1e..000000000 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,851 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var ( - // SupportNegativeIndices decides whether to support non-standard practice of - // allowing negative indices to mean indices starting at the end of an array. - // Default to true. - SupportNegativeIndices bool = true - // AccumulatedCopySizeLimit limits the total size increase in bytes caused by - // "copy" operations in a patch. - AccumulatedCopySizeLimit int64 = 0 -) - -var ( - ErrTestFailed = errors.New("test failed") - ErrMissing = errors.New("missing value") - ErrUnknownType = errors.New("unknown object type") - ErrInvalid = errors.New("invalid state detected") - ErrInvalidIndex = errors.New("invalid index referenced") -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -// Operation is a single JSON-Patch step, such as a single 'add' operation. -type Operation map[string]*json.RawMessage - -// Patch is an ordered collection of Operations. -type Patch []Operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, ErrUnknownType - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func deepCopy(src *lazyNode) (*lazyNode, int, error) { - if src == nil { - return nil, 0, nil - } - a, err := src.MarshalJSON() - if err != nil { - return nil, 0, err - } - sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - if len(n.doc) != len(o.doc) { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if (v == nil) != (ov == nil) { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -// Kind reads the "op" field of the Operation. -func (o Operation) Kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -// Path reads the "path" field of the Operation. -func (o Operation) Path() (string, error) { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") -} - -// From reads the "from" field of the Operation. -func (o Operation) From() (string, error) { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") -} - -func (o Operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -// ValueInterface decodes the operation value into an interface. -func (o Operation) ValueInterface() (interface{}, error) { - if obj, ok := o["value"]; ok && obj != nil { - var v interface{} - - err := json.Unmarshal(*obj, &v) - - if err != nil { - return nil, err - } - - return v, nil - } - - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil || next.raw == nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -// set should only be used to implement the "replace" operation, so "key" must -// be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - (*d)[idx] = val - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) - } - - sz := len(*d) + 1 - - ary := make([]*lazyNode, sz) - - cur := *d - - if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(ary) - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx < 0 { - if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(cur) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.add(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) - } - - return nil -} - -func (p Patch) remove(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) replace(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") - } - - if path == "" { - val := op.value() - - if val.which == eRaw { - if !val.tryDoc() { - if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") - } - } - } - - switch val.which { - case eAry: - *doc = &val.ary - case eDoc: - *doc = &val.doc - case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") - } - - return nil - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) - } - - err = con.set(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) move(doc *container, op Operation) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) - } - - err = con.add(key, val) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) - } - - return nil -} - -func (p Patch) test(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") - } - - if path == "" { - var self lazyNode - - switch sv := (*doc).(type) { - case *partialDoc: - self.doc = *sv - self.which = eDoc - case *partialArray: - self.ary = *sv - self.which = eAry - } - - if self.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) - } - - if val == nil { - if op.value() == nil || op.value().raw == nil { - return nil - } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) - } - - valCopy, sz, err := deepCopy(val) - if err != nil { - return errors.Wrapf(err, "error while performing deep copy") - } - - (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) - } - - err = con.add(key, valCopy) - if err != nil { - return errors.Wrapf(err, "error while adding value during copy") - } - - return nil -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - if len(doc) == 0 { - return doc, nil - } - - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - var accumulatedCopySize int64 - - for _, op := range p { - switch op.Kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) - default: - err = fmt.Errorf("Unexpected kind: %s", op.Kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index eac1c7664..000000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index fdff3fdb4..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33d..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd85..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3e..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f80760..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 000000000..eab5dbf7b --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,10 @@ +# BTree implementation for Go + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 000000000..6f5184fef --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,893 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 000000000..e44a0f488 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 6e2fc073d..81549fb4c 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -39,6 +39,7 @@ go_library( "//common/types/traits:go_default_library", "//interpreter:go_default_library", "//parser:go_default_library", + "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protodesc:go_default_library", @@ -81,7 +82,6 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//encoding/prototext:go_default_library", diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go index b59e3708d..418806021 100644 --- a/vendor/github.com/google/cel-go/cel/decls.go +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -23,6 +23,7 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -312,20 +313,34 @@ func ExprTypeToType(t *exprpb.Type) (*Type, error) { // ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { + return AlphaProtoAsDeclaration(d) +} + +// AlphaProtoAsDeclaration converts a v1alpha1.Decl value describing a variable or function into an EnvOption. +func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) { + canonical := &celpb.Decl{} + if err := convertProto(d, canonical); err != nil { + return nil, err + } + return ProtoAsDeclaration(canonical) +} + +// ProtoAsDeclaration converts a canonical celpb.Decl value describing a variable or function into an EnvOption. +func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { switch d.GetDeclKind().(type) { - case *exprpb.Decl_Function: + case *celpb.Decl_Function: overloads := d.GetFunction().GetOverloads() opts := make([]FunctionOpt, len(overloads)) for i, o := range overloads { args := make([]*Type, len(o.GetParams())) for j, p := range o.GetParams() { - a, err := types.ExprTypeToType(p) + a, err := types.ProtoAsType(p) if err != nil { return nil, err } args[j] = a } - res, err := types.ExprTypeToType(o.GetResultType()) + res, err := types.ProtoAsType(o.GetResultType()) if err != nil { return nil, err } @@ -336,15 +351,15 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { } } return Function(d.GetName(), opts...), nil - case *exprpb.Decl_Ident: - t, err := types.ExprTypeToType(d.GetIdent().GetType()) + case *celpb.Decl_Ident: + t, err := types.ProtoAsType(d.GetIdent().GetType()) if err != nil { return nil, err } if d.GetIdent().GetValue() == nil { return Variable(d.GetName(), t), nil } - val, err := ast.ConstantToVal(d.GetIdent().GetValue()) + val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue()) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index 6568a8b80..ab736b776 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -44,6 +44,9 @@ type Ast struct { // NativeRep converts the AST to a Go-native representation. func (ast *Ast) NativeRep() *celast.AST { + if ast == nil { + return nil + } return ast.impl } @@ -55,16 +58,13 @@ func (ast *Ast) Expr() *exprpb.Expr { if ast == nil { return nil } - pbExpr, _ := celast.ExprToProto(ast.impl.Expr()) + pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) return pbExpr } // IsChecked returns whether the Ast value has been successfully type-checked. func (ast *Ast) IsChecked() bool { - if ast == nil { - return false - } - return ast.impl.IsChecked() + return ast.NativeRep().IsChecked() } // SourceInfo returns character offset and newline position information about expression elements. @@ -72,7 +72,7 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { if ast == nil { return nil } - pbInfo, _ := celast.SourceInfoToProto(ast.impl.SourceInfo()) + pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) return pbInfo } @@ -95,7 +95,7 @@ func (ast *Ast) OutputType() *Type { if ast == nil { return types.ErrorType } - return ast.impl.GetType(ast.impl.Expr().ID()) + return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) } // Source returns a view of the input used to create the Ast. This source may be complete or @@ -218,12 +218,12 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { if err != nil { errs := common.NewErrors(ast.Source()) errs.ReportError(common.NoLocation, err.Error()) - return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } - checked, errs := checker.Check(ast.impl, ast.Source(), chk) + checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) if len(errs.GetErrors()) > 0 { - return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. @@ -244,7 +244,7 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { } } // Apply additional validators on the type-checked result. - iss := NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) + iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) for _, v := range e.validators { v.Validate(e, vConfig, checked, iss) } @@ -309,17 +309,13 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. - varsCopy := []*decls.VariableDecl{} if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) - } else { - // If the type-checker has not been instantiated, ensure the unvalidated declarations are - // provided to the extended Env instance. - varsCopy = make([]*decls.VariableDecl, len(e.variables)) - copy(varsCopy, e.variables) } + varsCopy := make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) // Copy macros and program options macsCopy := make([]parser.Macro, len(e.macros)) @@ -416,6 +412,17 @@ func (e *Env) Libraries() []string { return libraries } +// HasFunction returns whether a specific function has been configured in the environment +func (e *Env) HasFunction(functionName string) bool { + _, ok := e.functions[functionName] + return ok +} + +// Functions returns map of Functions, keyed by function name, that have been configured in the environment. +func (e *Env) Functions() map[string]*decls.FunctionDecl { + return e.functions +} + // HasValidator returns whether a specific ASTValidator has been configured in the environment. func (e *Env) HasValidator(name string) bool { for _, v := range e.validators { @@ -452,6 +459,12 @@ func (e *Env) ParseSource(src Source) (*Ast, *Issues) { // Program generates an evaluable instance of the Ast within the environment (Env). func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { + return e.PlanProgram(ast.NativeRep(), opts...) +} + +// PlanProgram generates an evaluable instance of the AST in the go-native representation within +// the environment (Env). +func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) { optSet := e.progOpts if len(opts) != 0 { mergedOpts := []ProgramOption{} @@ -459,7 +472,7 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { mergedOpts = append(mergedOpts, opts...) optSet = mergedOpts } - return newProgram(e, ast, optSet) + return newProgram(e, a, optSet) } // CELTypeAdapter returns the `types.Adapter` configured for the environment. @@ -753,10 +766,10 @@ func (i *Issues) Append(other *Issues) *Issues { if i == nil { return other } - if other == nil { + if other == nil || i == other { return i } - return NewIssues(i.errs.Append(other.errs.GetErrors())) + return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) } // String converts the issues to a suitable display string. @@ -790,7 +803,7 @@ type interopCELTypeProvider struct { // FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. // -// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type +// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if et, found := p.FindType(typeName); found { @@ -813,7 +826,7 @@ func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string // FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field // name, if one exists. // -// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type +// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { if ft, found := p.FindFieldType(structType, fieldName); found { diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go index 3133fb9d7..7d08d1c81 100644 --- a/vendor/github.com/google/cel-go/cel/io.go +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -28,6 +28,7 @@ import ( "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/parser" + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" anypb "google.golang.org/protobuf/types/known/anypb" ) @@ -104,72 +105,86 @@ func AstToString(a *Ast) (string, error) { // RefValueToValue converts between ref.Val and api.expr.Value. // The result Value is the serialized proto form. The ref.Val must not be error or unknown. func RefValueToValue(res ref.Val) (*exprpb.Value, error) { + return ValueAsAlphaProto(res) +} + +func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) { + canonical, err := ValueAsProto(res) + if err != nil { + return nil, err + } + alpha := &exprpb.Value{} + err = convertProto(canonical, alpha) + return alpha, err +} + +func ValueAsProto(res ref.Val) (*celpb.Value, error) { switch res.Type() { case types.BoolType: - return &exprpb.Value{ - Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil case types.BytesType: - return &exprpb.Value{ - Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil case types.DoubleType: - return &exprpb.Value{ - Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil case types.IntType: - return &exprpb.Value{ - Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil case types.ListType: l := res.(traits.Lister) sz := l.Size().(types.Int) - elts := make([]*exprpb.Value, 0, int64(sz)) + elts := make([]*celpb.Value, 0, int64(sz)) for i := types.Int(0); i < sz; i++ { - v, err := RefValueToValue(l.Get(i)) + v, err := ValueAsProto(l.Get(i)) if err != nil { return nil, err } elts = append(elts, v) } - return &exprpb.Value{ - Kind: &exprpb.Value_ListValue{ - ListValue: &exprpb.ListValue{Values: elts}}}, nil + return &celpb.Value{ + Kind: &celpb.Value_ListValue{ + ListValue: &celpb.ListValue{Values: elts}}}, nil case types.MapType: mapper := res.(traits.Mapper) sz := mapper.Size().(types.Int) - entries := make([]*exprpb.MapValue_Entry, 0, int64(sz)) + entries := make([]*celpb.MapValue_Entry, 0, int64(sz)) for it := mapper.Iterator(); it.HasNext().(types.Bool); { k := it.Next() v := mapper.Get(k) - kv, err := RefValueToValue(k) + kv, err := ValueAsProto(k) if err != nil { return nil, err } - vv, err := RefValueToValue(v) + vv, err := ValueAsProto(v) if err != nil { return nil, err } - entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv}) + entries = append(entries, &celpb.MapValue_Entry{Key: kv, Value: vv}) } - return &exprpb.Value{ - Kind: &exprpb.Value_MapValue{ - MapValue: &exprpb.MapValue{Entries: entries}}}, nil + return &celpb.Value{ + Kind: &celpb.Value_MapValue{ + MapValue: &celpb.MapValue{Entries: entries}}}, nil case types.NullType: - return &exprpb.Value{ - Kind: &exprpb.Value_NullValue{}}, nil + return &celpb.Value{ + Kind: &celpb.Value_NullValue{}}, nil case types.StringType: - return &exprpb.Value{ - Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_StringValue{StringValue: res.Value().(string)}}, nil case types.TypeType: typeName := res.(ref.Type).TypeName() - return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil + return &celpb.Value{Kind: &celpb.Value_TypeValue{TypeValue: typeName}}, nil case types.UintType: - return &exprpb.Value{ - Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil default: any, err := res.ConvertToNative(anyPbType) if err != nil { return nil, err } - return &exprpb.Value{ - Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil } } @@ -192,55 +207,67 @@ var ( // ValueToRefValue converts between exprpb.Value and ref.Val. func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + return AlphaProtoAsValue(adapter, v) +} + +func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + canonical := &celpb.Value{} + if err := convertProto(v, canonical); err != nil { + return nil, err + } + return ProtoAsValue(adapter, canonical) +} + +func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { switch v.Kind.(type) { - case *exprpb.Value_NullValue: + case *celpb.Value_NullValue: return types.NullValue, nil - case *exprpb.Value_BoolValue: + case *celpb.Value_BoolValue: return types.Bool(v.GetBoolValue()), nil - case *exprpb.Value_Int64Value: + case *celpb.Value_Int64Value: return types.Int(v.GetInt64Value()), nil - case *exprpb.Value_Uint64Value: + case *celpb.Value_Uint64Value: return types.Uint(v.GetUint64Value()), nil - case *exprpb.Value_DoubleValue: + case *celpb.Value_DoubleValue: return types.Double(v.GetDoubleValue()), nil - case *exprpb.Value_StringValue: + case *celpb.Value_StringValue: return types.String(v.GetStringValue()), nil - case *exprpb.Value_BytesValue: + case *celpb.Value_BytesValue: return types.Bytes(v.GetBytesValue()), nil - case *exprpb.Value_ObjectValue: + case *celpb.Value_ObjectValue: any := v.GetObjectValue() msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true}) if err != nil { return nil, err } return adapter.NativeToValue(msg), nil - case *exprpb.Value_MapValue: + case *celpb.Value_MapValue: m := v.GetMapValue() entries := make(map[ref.Val]ref.Val) for _, entry := range m.Entries { - key, err := ValueToRefValue(adapter, entry.Key) + key, err := ProtoAsValue(adapter, entry.Key) if err != nil { return nil, err } - pb, err := ValueToRefValue(adapter, entry.Value) + pb, err := ProtoAsValue(adapter, entry.Value) if err != nil { return nil, err } entries[key] = pb } return adapter.NativeToValue(entries), nil - case *exprpb.Value_ListValue: + case *celpb.Value_ListValue: l := v.GetListValue() elts := make([]ref.Val, len(l.Values)) for i, e := range l.Values { - rv, err := ValueToRefValue(adapter, e) + rv, err := ProtoAsValue(adapter, e) if err != nil { return nil, err } elts[i] = rv } return adapter.NativeToValue(elts), nil - case *exprpb.Value_TypeValue: + case *celpb.Value_TypeValue: typeName := v.GetTypeValue() tv, ok := typeNameToTypeValue[typeName] if ok { @@ -250,3 +277,12 @@ func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { } return nil, errors.New("unknown value") } + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index deddc14e5..be59f1b02 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -403,7 +403,7 @@ func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, * meh.NewList(), unusedIterVar, varName, - meh.NewMemberCall(valueFunc, target), + meh.NewMemberCall(valueFunc, meh.Copy(target)), meh.NewLiteral(types.False), meh.NewIdent(varName), mapExpr, @@ -430,7 +430,7 @@ func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Exp meh.NewList(), unusedIterVar, varName, - meh.NewMemberCall(valueFunc, target), + meh.NewMemberCall(valueFunc, meh.Copy(target)), meh.NewLiteral(types.False), meh.NewIdent(varName), mapExpr, @@ -446,6 +446,12 @@ func enableOptionalSyntax() EnvOption { } } +// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field +// selection is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) EnvOption { + return features(featureEnableErrorOnBadPresenceTest, value) +} + func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { call, ok := i.(interpreter.InterpretableCall) if !ok { diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go index f26df4623..c149abb70 100644 --- a/vendor/github.com/google/cel-go/cel/optimizer.go +++ b/vendor/github.com/google/cel-go/cel/optimizer.go @@ -15,6 +15,8 @@ package cel import ( + "sort" + "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" @@ -98,14 +100,21 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // that the ids within the expression correspond to the ids within macros. func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { optimized.RenumberIDs(idGen) - if len(info.MacroCalls()) == 0 { return } + // Sort the macro ids to make sure that the renumbering of macro-specific variables + // is stable across normalization calls. + sortedMacroIDs := []int64{} + for id := range info.MacroCalls() { + sortedMacroIDs = append(sortedMacroIDs, id) + } + sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] }) + // First, update the macro call ids themselves. callIDMap := map[int64]int64{} - for id := range info.MacroCalls() { + for _, id := range sortedMacroIDs { callIDMap[id] = idGen(id) } // Then update the macro call definitions which refer to these ids, but @@ -116,7 +125,8 @@ func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInf call ast.Expr } macroUpdates := []macroUpdate{} - for oldID, newID := range callIDMap { + for _, oldID := range sortedMacroIDs { + newID := callIDMap[oldID] call, found := info.GetMacroCall(oldID) if !found { continue @@ -134,6 +144,7 @@ func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { if len(info.MacroCalls()) == 0 { return } + // Sanitize the macro call references once the optimized expression has been computed // and the ids normalized between the expression and the macros. exprRefMap := make(map[int64]struct{}) @@ -200,6 +211,16 @@ type OptimizerContext struct { *Issues } +// ExtendEnv auguments the context's environment with the additional options. +func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { + e, err := opt.Env.Extend(opts...) + if err != nil { + return err + } + opt.Env = e + return nil +} + // ASTOptimizer applies an optimization over an AST and returns the optimized result. type ASTOptimizer interface { // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. @@ -253,6 +274,11 @@ func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { opt.sourceInfo.SetMacroCall(id, expr) } +// MacroCalls returns the map of macro calls currently in the context. +func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { + return opt.sourceInfo.MacroCalls() +} + // NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression // representing the unexpanded call signature to be inserted into the source info macro call metadata. func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index 3c53e21af..69c694263 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -61,6 +61,10 @@ const ( // compressing the logic graph to a single call when multiple like-operator // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) featureVariadicLogicalASTs + + // Enable error generation when a presence test or optional field selection is + // performed on a primitive type. + featureEnableErrorOnBadPresenceTest ) // EnvOption is a functional interface for configuring the environment. @@ -243,6 +247,13 @@ func Abbrevs(qualifiedNames ...string) EnvOption { } } +// customTypeRegistry is an internal-only interface containing the minimum methods required to support +// custom types. It is a subset of methods from ref.TypeRegistry. +type customTypeRegistry interface { + RegisterDescriptor(protoreflect.FileDescriptor) error + RegisterType(...ref.Type) error +} + // Types adds one or more type declarations to the environment, allowing for construction of // type-literals whose definitions are included in the common expression built-in set. // @@ -255,12 +266,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { - var reg ref.TypeRegistry - var isReg bool - reg, isReg = e.provider.(*types.Registry) - if !isReg { - reg, isReg = e.provider.(ref.TypeRegistry) - } + reg, isReg := e.provider.(customTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -297,7 +303,7 @@ func Types(addTypes ...any) EnvOption { // extension or by re-using the same EnvOption with another NewEnv() call. func TypeDescs(descs ...any) EnvOption { return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(ref.TypeRegistry) + reg, isReg := e.provider.(customTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -345,7 +351,7 @@ func TypeDescs(descs ...any) EnvOption { } } -func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error { +func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { files, err := protodesc.NewFiles(fileSet) if err != nil { return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) @@ -353,7 +359,7 @@ func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) er return registerFiles(reg, files) } -func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error { +func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { var err error files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { err = reg.RegisterDescriptor(fd) diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index ece9fbdaf..6f477afc9 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -19,6 +19,7 @@ import ( "fmt" "sync" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -151,7 +152,7 @@ func (p *prog) clone() *prog { // ProgramOption values. // // If the program cannot be configured the prog will be nil, with a non-nil error response. -func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { +func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { // Build the dispatcher, interpreter, and default program value. disp := interpreter.NewDispatcher() @@ -187,10 +188,13 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { // Set the attribute factory after the options have been set. var attrFactory interpreter.AttributeFactory + attrFactorOpts := []interpreter.AttrFactoryOption{ + interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)), + } if p.evalOpts&OptPartialEval == OptPartialEval { - attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider) + attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } else { - attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider) + attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory) p.interpreter = interp @@ -252,9 +256,9 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { return p.initInterpretable(a, decorators) } -func (p *prog) initInterpretable(a *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { +func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) { // When the AST has been exprAST it contains metadata that can be used to speed up program execution. - interpretable, err := p.interpreter.NewInterpretable(a.impl, decs...) + interpretable, err := p.interpreter.NewInterpretable(a, decs...) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel index 997fa91d1..678b412a9 100644 --- a/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -16,7 +16,6 @@ go_library( "options.go", "printer.go", "scopes.go", - "standard.go", "types.go", ], importpath = "github.com/google/cel-go/checker", diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index 57fb3ce5e..0603cfa30 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -496,16 +496,32 @@ func (c *checker) checkComprehension(e ast.Expr) { comp := e.AsComprehension() c.check(comp.IterRange()) c.check(comp.AccuInit()) - accuType := c.getType(comp.AccuInit()) rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) - var varType *types.Type + // Create a scope for the comprehension since it has a local accumulation variable. + // This scope will contain the accumulation variable used to compute the result. + accuType := c.getType(comp.AccuInit()) + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) + + var varType, var2Type *types.Type switch rangeType.Kind() { case types.ListKind: + // varType represents the list element type for one-variable comprehensions. varType = rangeType.Parameters()[0] + if comp.HasIterVar2() { + // varType represents the list index (int) for two-variable comprehensions, + // and var2Type represents the list element type. + var2Type = varType + varType = types.IntType + } case types.MapKind: - // Ranges over the keys. + // varType represents the map entry key for all comprehension types. varType = rangeType.Parameters()[0] + if comp.HasIterVar2() { + // var2Type represents the map entry value for two-variable comprehensions. + var2Type = rangeType.Parameters()[1] + } case types.DynKind, types.ErrorKind, types.TypeParamKind: // Set the range type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type @@ -518,13 +534,12 @@ func (c *checker) checkComprehension(e ast.Expr) { varType = types.ErrorType } - // Create a scope for the comprehension since it has a local accumulation variable. - // This scope will contain the accumulation variable used to compute the result. - c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) // Create a block scope for the loop. c.env = c.env.enterScope() c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) + if comp.HasIterVar2() { + c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type)) + } // Check the variable references in the condition and step. c.check(comp.LoopCondition()) c.assertType(comp.LoopCondition(), types.BoolType) diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go deleted file mode 100644 index 11b35b80e..000000000 --- a/vendor/github.com/google/cel-go/checker/standard.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/common/stdlib" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// StandardFunctions returns the Decls for all functions in the evaluator. -// -// Deprecated: prefer stdlib.FunctionExprDecls() -func StandardFunctions() []*exprpb.Decl { - return stdlib.FunctionExprDecls() -} - -// StandardTypes returns the set of type identifiers for standard library types. -// -// Deprecated: prefer stdlib.TypeExprDecls() -func StandardTypes() []*exprpb.Decl { - return stdlib.TypeExprDecls() -} diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel index d6165b13a..eef7f281b 100644 --- a/vendor/github.com/google/cel-go/common/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -18,7 +18,6 @@ go_library( deps = [ "//common/runes:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_x_text//width:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel index 5c40c3781..9824f57a9 100644 --- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -15,11 +15,13 @@ go_library( "navigable.go", ], importpath = "github.com/google/cel-go/common/ast", - deps = [ + deps = [ "//common:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", + "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], ) @@ -35,12 +37,13 @@ go_test( embed = [ ":go_default_library", ], - deps = [ + deps = [ "//checker:go_default_library", "//checker/decls:go_default_library", "//common:go_default_library", "//common/containers:go_default_library", "//common/decls:go_default_library", + "//common/operators:go_default_library", "//common/overloads:go_default_library", "//common/stdlib:go_default_library", "//common/types:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go index 355ddd49a..b807669d4 100644 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -310,21 +310,18 @@ func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { s.offsetRanges[id] = o } +// ClearOffsetRange removes the OffsetRange for the given expression id. +func (s *SourceInfo) ClearOffsetRange(id int64) { + if s != nil { + delete(s.offsetRanges, id) + } +} + // GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character // of the expression node at the id. func (s *SourceInfo) GetStartLocation(id int64) common.Location { if o, found := s.GetOffsetRange(id); found { - line := 1 - col := int(o.Start) - for _, lineOffset := range s.LineOffsets() { - if lineOffset < o.Start { - line++ - col = int(o.Start - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) + return s.GetLocationByOffset(o.Start) } return common.NoLocation } @@ -336,21 +333,25 @@ func (s *SourceInfo) GetStartLocation(id int64) common.Location { // be identical to the start location for the expression. func (s *SourceInfo) GetStopLocation(id int64) common.Location { if o, found := s.GetOffsetRange(id); found { - line := 1 - col := int(o.Stop) - for _, lineOffset := range s.LineOffsets() { - if lineOffset < o.Stop { - line++ - col = int(o.Stop - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) + return s.GetLocationByOffset(o.Stop) } return common.NoLocation } +// GetLocationByOffset returns the line and column information for a given character offset. +func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { + line := 1 + col := int(offset) + for _, lineOffset := range s.LineOffsets() { + if lineOffset > offset { + break + } + line++ + col = int(offset - lineOffset) + } + return common.NewLocation(line, col) +} + // ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. func (s *SourceInfo) ComputeOffset(line, col int32) int32 { if s != nil { diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go index 8f2c4bd1e..435d8f654 100644 --- a/vendor/github.com/google/cel-go/common/ast/conversion.go +++ b/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -17,12 +17,14 @@ package ast import ( "fmt" + "google.golang.org/protobuf/proto" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - structpb "google.golang.org/protobuf/types/known/structpb" - + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + structpb "google.golang.org/protobuf/types/known/structpb" ) // ToProto converts an AST to a CheckedExpr protobouf. @@ -173,9 +175,10 @@ func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehe if err != nil { return nil, err } - return factory.NewComprehension(id, + return factory.NewComprehensionTwoVar(id, iterRange, comp.GetIterVar(), + comp.GetIterVar2(), comp.GetAccuVar(), accuInit, loopCond, @@ -363,6 +366,7 @@ func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) ExprKind: &exprpb.Expr_ComprehensionExpr{ ComprehensionExpr: &exprpb.Expr_Comprehension{ IterVar: comp.IterVar(), + IterVar2: comp.IterVar2(), IterRange: iterRange, AccuVar: comp.AccuVar(), AccuInit: accuInit, @@ -609,24 +613,47 @@ func ValToConstant(v ref.Val) (*exprpb.Constant, error) { // ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { + return AlphaProtoConstantAsVal(c) +} + +// AlphaProtoConstantAsVal converts a v1alpha1.Constant protobuf to a CEL-native ref.Val. +func AlphaProtoConstantAsVal(c *exprpb.Constant) (ref.Val, error) { if c == nil { return nil, nil } + canonical := &celpb.Constant{} + if err := convertProto(c, canonical); err != nil { + return nil, err + } + return ProtoConstantAsVal(canonical) +} + +// ProtoConstantAsVal converts a canonical celpb.Constant protobuf to a CEL-native ref.Val. +func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) { switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: + case *celpb.Constant_BoolValue: return types.Bool(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: + case *celpb.Constant_BytesValue: return types.Bytes(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: + case *celpb.Constant_DoubleValue: return types.Double(c.GetDoubleValue()), nil - case *exprpb.Constant_Int64Value: + case *celpb.Constant_Int64Value: return types.Int(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: + case *celpb.Constant_NullValue: return types.NullValue, nil - case *exprpb.Constant_StringValue: + case *celpb.Constant_StringValue: return types.String(c.GetStringValue()), nil - case *exprpb.Constant_Uint64Value: + case *celpb.Constant_Uint64Value: return types.Uint(c.GetUint64Value()), nil } return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) } + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go index c9d88bbaa..9f55cb3b9 100644 --- a/vendor/github.com/google/cel-go/common/ast/expr.go +++ b/vendor/github.com/google/cel-go/common/ast/expr.go @@ -158,7 +158,7 @@ type EntryExpr interface { // IDGenerator produces unique ids suitable for tagging expression nodes type IDGenerator func(originalID int64) int64 -// CallExpr defines an interface for inspecting a function call and its arugments. +// CallExpr defines an interface for inspecting a function call and its arguments. type CallExpr interface { // FunctionName returns the name of the function. FunctionName() string @@ -269,8 +269,22 @@ type ComprehensionExpr interface { IterRange() Expr // IterVar returns the iteration variable name. + // + // For one-variable comprehensions, the iter var refers to the element value + // when iterating over a list, or the map key when iterating over a map. + // + // For two-variable comprehneions, the iter var refers to the list index or the + // map key. IterVar() string + // IterVar2 returns the second iteration variable name. + // + // When the value is non-empty, the comprehension is a two-variable comprehension. + IterVar2() string + + // HasIterVar2 returns true if the second iteration variable is non-empty. + HasIterVar2() bool + // AccuVar returns the accumulation variable name. AccuVar() string @@ -397,6 +411,7 @@ func (e *expr) SetKindCase(other Expr) { e.exprKindCase = &baseComprehensionExpr{ iterRange: c.IterRange(), iterVar: c.IterVar(), + iterVar2: c.IterVar2(), accuVar: c.AccuVar(), accuInit: c.AccuInit(), loopCond: c.LoopCondition(), @@ -505,6 +520,7 @@ var _ ComprehensionExpr = &baseComprehensionExpr{} type baseComprehensionExpr struct { iterRange Expr iterVar string + iterVar2 string accuVar string accuInit Expr loopCond Expr @@ -527,6 +543,14 @@ func (e *baseComprehensionExpr) IterVar() string { return e.iterVar } +func (e *baseComprehensionExpr) IterVar2() string { + return e.iterVar2 +} + +func (e *baseComprehensionExpr) HasIterVar2() bool { + return e.iterVar2 != "" +} + func (e *baseComprehensionExpr) AccuVar() string { return e.accuVar } diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go index b7f36e72a..994806b79 100644 --- a/vendor/github.com/google/cel-go/common/ast/factory.go +++ b/vendor/github.com/google/cel-go/common/ast/factory.go @@ -27,9 +27,12 @@ type ExprFactory interface { // NewCall creates an Expr value representing a global function call. NewCall(id int64, function string, args ...Expr) Expr - // NewComprehension creates an Expr value representing a comprehension over a value range. + // NewComprehension creates an Expr value representing a one-variable comprehension over a value range. NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range. + NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + // NewMemberCall creates an Expr value representing a member function call. NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr @@ -111,11 +114,17 @@ func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr } func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + // Set the iter_var2 to empty string to indicate the second variable is omitted + return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result) +} + +func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { return fac.newExpr( id, &baseComprehensionExpr{ iterRange: iterRange, iterVar: iterVar, + iterVar2: iterVar2, accuVar: accuVar, accuInit: accuInit, loopCond: loopCond, @@ -223,9 +232,10 @@ func (fac *baseExprFactory) CopyExpr(e Expr) Expr { return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) case ComprehensionKind: compre := e.AsComprehension() - return fac.NewComprehension(e.ID(), + return fac.NewComprehensionTwoVar(e.ID(), fac.CopyExpr(compre.IterRange()), compre.IterVar(), + compre.IterVar2(), compre.AccuVar(), fac.CopyExpr(compre.AccuInit()), fac.CopyExpr(compre.LoopCondition()), diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go index f5ddf6aac..d7a90fb7c 100644 --- a/vendor/github.com/google/cel-go/common/ast/navigable.go +++ b/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -390,6 +390,14 @@ func (comp navigableComprehensionImpl) IterVar() string { return comp.Expr.AsComprehension().IterVar() } +func (comp navigableComprehensionImpl) IterVar2() string { + return comp.Expr.AsComprehension().IterVar2() +} + +func (comp navigableComprehensionImpl) HasIterVar2() bool { + return comp.Expr.AsComprehension().HasIterVar2() +} + func (comp navigableComprehensionImpl) AccuVar() string { return comp.Expr.AsComprehension().AccuVar() } diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go index 52153d4cd..3097a3f78 100644 --- a/vendor/github.com/google/cel-go/common/containers/container.go +++ b/vendor/github.com/google/cel-go/common/containers/container.go @@ -19,6 +19,7 @@ package containers import ( "fmt" "strings" + "unicode" "github.com/google/cel-go/common/ast" ) @@ -212,6 +213,13 @@ type ContainerOption func(*Container) (*Container, error) func Abbrevs(qualifiedNames ...string) ContainerOption { return func(c *Container) (*Container, error) { for _, qn := range qualifiedNames { + qn = strings.TrimSpace(qn) + for _, r := range qn { + if !isIdentifierChar(r) { + return nil, fmt.Errorf( + "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) + } + } ind := strings.LastIndex(qn, ".") if ind <= 0 || ind >= len(qn)-1 { return nil, fmt.Errorf( @@ -278,6 +286,10 @@ func aliasAs(kind, qualifiedName, alias string) ContainerOption { } } +func isIdentifierChar(r rune) bool { + return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) +} + // Name sets the fully-qualified name of the Container. func Name(name string) ContainerOption { return func(c *Container) (*Container, error) { diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go index e4c01ac6e..25d2e3d71 100644 --- a/vendor/github.com/google/cel-go/common/debug/debug.go +++ b/vendor/github.com/google/cel-go/common/debug/debug.go @@ -215,6 +215,11 @@ func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { w.append(comprehension.IterVar()) w.append(",") w.appendLine() + if comprehension.HasIterVar2() { + w.append(comprehension.IterVar2()) + w.append(",") + w.appendLine() + } w.append("// Target") w.appendLine() w.Buffer(comprehension.IterRange()) diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go index 734ebe57e..f67808feb 100644 --- a/vendor/github.com/google/cel-go/common/decls/decls.go +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -162,7 +162,9 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { if oID == overload.ID() { if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { // Allow redefinition of an overload implementation so long as the signatures match. - f.overloads[oID] = overload + if overload.hasBinding() { + f.overloads[oID] = overload + } return nil } return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) @@ -249,15 +251,15 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { // are preserved in order to assist with the function resolution step. switch len(args) { case 1: - if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.unaryOp(args[0]) } case 2: - if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + if o.binaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.binaryOp(args[0], args[1]) } } - if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.functionOp(args...) } // eventually this will fall through to the noSuchOverload below. @@ -775,8 +777,13 @@ func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) } -// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. -func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { +// TypeVariable creates a new type identifier for use within a types.Provider +func TypeVariable(t *types.Type) *VariableDecl { + return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) +} + +// variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. +func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { varType, err := types.TypeToExprType(v.Type()) if err != nil { return nil, err @@ -784,13 +791,8 @@ func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { return chkdecls.NewVar(v.Name(), varType), nil } -// TypeVariable creates a new type identifier for use within a types.Provider -func TypeVariable(t *types.Type) *VariableDecl { - return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) -} - -// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. -func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { +// functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. +func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) for i, oID := range f.overloadOrdinals { o := f.overloads[oID] diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go index 774dcb5b4..0cf21345e 100644 --- a/vendor/github.com/google/cel-go/common/error.go +++ b/vendor/github.com/google/cel-go/common/error.go @@ -18,8 +18,6 @@ import ( "fmt" "strings" "unicode/utf8" - - "golang.org/x/text/width" ) // NewError creates an error associated with an expression id with the given message at the given location. @@ -35,18 +33,15 @@ type Error struct { } const ( - dot = "." - ind = "^" + dot = "." + ind = "^" + wideDot = "\uff0e" + wideInd = "\uff3e" // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. maxSnippetLength = 16384 ) -var ( - wideDot = width.Widen.String(dot) - wideInd = width.Widen.String(ind) -) - // ToDisplayString decorates the error message with the source location. func (e *Error) ToDisplayString(source Source) string { var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go index 50aac0b27..021198224 100644 --- a/vendor/github.com/google/cel-go/common/runes/buffer.go +++ b/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -127,20 +127,48 @@ var nilBuffer = &emptyBuffer{} // elements of the byte or uint16 array, and continue. The underlying storage is an rune array // containing any Unicode character. func NewBuffer(data string) Buffer { + buf, _ := newBuffer(data, false) + return buf +} + +// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on +// the ranges of the encoded code points contained within, as well as returning the line offsets. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBufferAndLineOffsets(data string) (Buffer, []int32) { + return newBuffer(data, true) +} + +func newBuffer(data string, lines bool) (Buffer, []int32) { if len(data) == 0 { - return nilBuffer + return nilBuffer, []int32{0} } var ( - idx = 0 - buf8 = make([]byte, 0, len(data)) + idx = 0 + off int32 = 0 + buf8 = make([]byte, 0, len(data)) buf16 []uint16 buf32 []rune + offs []int32 ) for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } if r < utf8.RuneSelf { buf8 = append(buf8, byte(r)) + off++ continue } if r <= 0xffff { @@ -150,6 +178,7 @@ func NewBuffer(data string) Buffer { } buf8 = nil buf16 = append(buf16, uint16(r)) + off++ goto copy16 } buf32 = make([]rune, len(buf8), len(data)) @@ -158,17 +187,25 @@ func NewBuffer(data string) Buffer { } buf8 = nil buf32 = append(buf32, r) + off++ goto copy32 } + if lines { + offs = append(offs, off+1) + } return &asciiBuffer{ arr: buf8, - } + }, offs copy16: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } if r <= 0xffff { buf16 = append(buf16, uint16(r)) + off++ continue } buf32 = make([]rune, len(buf16), len(data)) @@ -177,18 +214,29 @@ copy16: } buf16 = nil buf32 = append(buf32, r) + off++ goto copy32 } + if lines { + offs = append(offs, off+1) + } return &basicBuffer{ arr: buf16, - } + }, offs copy32: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } buf32 = append(buf32, r) + off++ + } + if lines { + offs = append(offs, off+1) } return &supplementalBuffer{ arr: buf32, - } + }, offs } diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go index acf22bdf1..ec79cb545 100644 --- a/vendor/github.com/google/cel-go/common/source.go +++ b/vendor/github.com/google/cel-go/common/source.go @@ -15,9 +15,6 @@ package common import ( - "strings" - "unicode/utf8" - "github.com/google/cel-go/common/runes" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -80,17 +77,11 @@ func NewTextSource(text string) Source { // NewStringSource creates a new Source from the given contents and description. func NewStringSource(contents string, description string) Source { // Compute line offsets up front as they are referred to frequently. - lines := strings.Split(contents, "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset - } + buf, offs := runes.NewBufferAndLineOffsets(contents) return &sourceImpl{ - Buffer: runes.NewBuffer(contents), + Buffer: buf, description: description, - lineOffsets: offsets, + lineOffsets: offs, } } @@ -172,9 +163,8 @@ func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { for _, lineOffset := range s.lineOffsets { if lineOffset > characterOffset { break - } else { - line++ } + line++ } if line == 1 { return line, 0 diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel index c130a93f6..b55f45215 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -12,7 +12,6 @@ go_library( ], importpath = "github.com/google/cel-go/common/stdlib", deps = [ - "//checker/decls:go_default_library", "//common/decls:go_default_library", "//common/functions:go_default_library", "//common/operators:go_default_library", @@ -20,6 +19,5 @@ go_library( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ], ) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go index d02cb64bf..1550c1786 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/standard.go +++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -23,15 +23,11 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) var ( stdFunctions []*decls.FunctionDecl - stdFnDecls []*exprpb.Decl stdTypes []*decls.VariableDecl - stdTypeDecls []*exprpb.Decl ) func init() { @@ -55,15 +51,6 @@ func init() { decls.TypeVariable(types.UintType), } - stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) - for _, stdType := range stdTypes { - typeVar, err := decls.VariableDeclToExprDecl(stdType) - if err != nil { - panic(err) - } - stdTypeDecls = append(stdTypeDecls, typeVar) - } - stdFunctions = []*decls.FunctionDecl{ // Logical operators. Special-cased within the interpreter. // Note, the singleton binding prevents extensions from overriding the operator behavior. @@ -576,18 +563,6 @@ func init() { decls.MemberOverload(overloads.DurationToMilliseconds, argTypes(types.DurationType), types.IntType)), } - - stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) - for _, fn := range stdFunctions { - if fn.IsDeclarationDisabled() { - continue - } - ed, err := decls.FunctionDeclToExprDecl(fn) - if err != nil { - panic(err) - } - stdFnDecls = append(stdFnDecls, ed) - } } // Functions returns the set of standard library function declarations and definitions for CEL. @@ -595,27 +570,11 @@ func Functions() []*decls.FunctionDecl { return stdFunctions } -// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads -// in the CEL standard environment. -// -// Deprecated: use Functions -func FunctionExprDecls() []*exprpb.Decl { - return stdFnDecls -} - // Types returns the set of standard library types for CEL. func Types() []*decls.VariableDecl { return stdTypes } -// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL -// standard environment. -// -// Deprecated: use Types -func TypeExprDecls() []*exprpb.Decl { - return stdTypeDecls -} - func notStrictlyFalse(value ref.Val) ref.Val { if types.IsBool(value) { return value diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel index b5e44ffbf..8f010fae4 100644 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -40,10 +40,12 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", + "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", "@org_golang_google_protobuf//types/known/anypb:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index 5838755f8..7e813e291 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -58,7 +58,17 @@ func (b Bytes) Compare(other ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { - case reflect.Array, reflect.Slice: + case reflect.Array: + if len(b) != typeDesc.Len() { + return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) + } + refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem())) + refArr := refArrPtr.Elem() + for i, byt := range b { + refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem())) + } + return refArr.Interface(), nil + case reflect.Slice: return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index 06f48dde7..ca47d39fe 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -256,6 +256,15 @@ func (l *baseList) IsZeroValue() bool { return l.size == 0 } +// Fold calls the FoldEntry method for each (index, value) pair in the list. +func (l *baseList) Fold(f traits.Folder) { + for i := 0; i < l.size; i++ { + if !f.FoldEntry(i, l.get(i)) { + break + } + } +} + // Iterator implements the traits.Iterable interface method. func (l *baseList) Iterator() traits.Iterator { return newListIterator(l) @@ -433,6 +442,15 @@ func (l *concatList) IsZeroValue() bool { return l.Size().(Int) == 0 } +// Fold calls the FoldEntry method for each (index, value) pair in the list. +func (l *concatList) Fold(f traits.Folder) { + for i := Int(0); i < l.Size().(Int); i++ { + if !f.FoldEntry(i, l.Get(i)) { + break + } + } +} + // Iterator implements the traits.Iterable interface method. func (l *concatList) Iterator() traits.Iterator { return newListIterator(l) @@ -527,3 +545,30 @@ func IndexOrError(index ref.Val) (int, error) { return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type()) } } + +// ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration. +// +// For values which are already Foldable, this call is a no-op. For all other values, the fold is +// driven via the Size() and Get() calls which means that the folding will function, but take a +// performance hit. +func ToFoldableList(l traits.Lister) traits.Foldable { + if f, ok := l.(traits.Foldable); ok { + return f + } + return interopFoldableList{Lister: l} +} + +type interopFoldableList struct { + traits.Lister +} + +// Fold implements the traits.Foldable interface method and performs an iteration over the +// range of elements of the list. +func (l interopFoldableList) Fold(f traits.Folder) { + sz := l.Size().(Int) + for i := Int(0); i < sz; i++ { + if !f.FoldEntry(i, l.Get(i)) { + break + } + } +} diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go index 739b7aab0..cb6cce78b 100644 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -94,6 +94,24 @@ func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { } } +// NewMutableMap constructs a mutable map from an adapter and a set of map values. +func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper { + mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues)) + for k, v := range mutableValues { + mutableCopy[k] = v + } + m := &mutableMap{ + baseMap: &baseMap{ + Adapter: adapter, + mapAccessor: newRefValMapAccessor(mutableCopy), + value: mutableCopy, + size: len(mutableCopy), + }, + mutableValues: mutableCopy, + } + return m +} + // mapAccessor is a private interface for finding values within a map and iterating over the keys. // This interface implements portions of the API surface area required by the traits.Mapper // interface. @@ -105,6 +123,9 @@ type mapAccessor interface { // Iterator returns an Iterator over the map key set. Iterator() traits.Iterator + + // Fold calls the FoldEntry method for each (key, value) pair in the map. + Fold(traits.Folder) } // baseMap is a reflection based map implementation designed to handle a variety of map-like types. @@ -307,6 +328,28 @@ func (m *baseMap) Value() any { return m.value } +// mutableMap holds onto a set of mutable values which are used for intermediate computations. +type mutableMap struct { + *baseMap + mutableValues map[ref.Val]ref.Val +} + +// Insert implements the traits.MutableMapper interface method, returning true if the key insertion +// succeeds. +func (m *mutableMap) Insert(k, v ref.Val) ref.Val { + if _, found := m.Find(k); found { + return NewErr("insert failed: key %v already exists", k) + } + m.mutableValues[k] = v + return m +} + +// ToImmutableMap implements the traits.MutableMapper interface method, converting a mutable map +// an immutable map implementation. +func (m *mutableMap) ToImmutableMap() traits.Mapper { + return NewRefValMap(m.Adapter, m.mutableValues) +} + func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { return &jsonStructAccessor{ Adapter: adapter, @@ -350,6 +393,15 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *jsonStructAccessor) Fold(f traits.Folder) { + for k, v := range a.st { + if !f.FoldEntry(k, v) { + break + } + } +} + func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { keyType := value.Type().Key() return &reflectMapAccessor{ @@ -424,6 +476,16 @@ func (m *reflectMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (m *reflectMapAccessor) Fold(f traits.Folder) { + mapRange := m.refValue.MapRange() + for mapRange.Next() { + if !f.FoldEntry(mapRange.Key().Interface(), mapRange.Value().Interface()) { + break + } + } +} + func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { return &refValMapAccessor{mapVal: mapVal} } @@ -477,6 +539,15 @@ func (a *refValMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *refValMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + func newStringMapAccessor(strMap map[string]string) mapAccessor { return &stringMapAccessor{mapVal: strMap} } @@ -515,6 +586,15 @@ func (a *stringMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *stringMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { return &stringIfaceMapAccessor{ Adapter: adapter, @@ -557,6 +637,15 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // accessing protoreflect.Map values. type protoMap struct { @@ -769,6 +858,13 @@ func (m *protoMap) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (m *protoMap) Fold(f traits.Folder) { + m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + return f.FoldEntry(k.Interface(), v.Interface()) + }) +} + // Size returns the number of entries in the protoreflect.Map. func (m *protoMap) Size() ref.Val { return Int(m.value.Len()) @@ -852,3 +948,55 @@ func (it *stringKeyIterator) Next() ref.Val { } return nil } + +// ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration. +// +// For values which are already Foldable, this call is a no-op. For all other values, the fold +// is driven via the Iterator HasNext() and Next() calls as well as the map's Get() method +// which means that the folding will function, but take a performance hit. +func ToFoldableMap(m traits.Mapper) traits.Foldable { + if f, ok := m.(traits.Foldable); ok { + return f + } + return interopFoldableMap{Mapper: m} +} + +type interopFoldableMap struct { + traits.Mapper +} + +func (m interopFoldableMap) Fold(f traits.Folder) { + it := m.Iterator() + for it.HasNext() == True { + k := it.Next() + if !f.FoldEntry(k, m.Get(k)) { + break + } + } +} + +// InsertMapKeyValue inserts a key, value pair into the target map if the target map does not +// already contain the given key. +// +// If the map is mutable, it is modified in-place per the MutableMapper contract. +// If the map is not mutable, a copy containing the new key, value pair is made. +func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val { + if mutable, ok := m.(traits.MutableMapper); ok { + return mutable.Insert(k, v) + } + + // Otherwise perform the slow version of the insertion which makes a copy of the incoming map. + if _, found := m.Find(k); !found { + size := m.Size().(Int) + copy := make(map[ref.Val]ref.Val, size+1) + copy[k] = v + it := m.Iterator() + for it.HasNext() == True { + nextK := it.Next() + nextV := m.Get(nextK) + copy[nextK] = nextV + } + return DefaultTypeAdapter.NativeToValue(copy) + } + return NewErr("insert failed: key %v already exists", k) +} diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go index 926ca3dc9..36514ff20 100644 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -35,6 +35,8 @@ var ( // golang reflect type for Null values. nullReflectType = reflect.TypeOf(NullValue) + + protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem() ) // ConvertToNative implements ref.Val.ConvertToNative. @@ -61,8 +63,14 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { return structpb.NewNullValue(), nil case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, - uint64WrapperType: + uint64WrapperType, durationValueType, timestampValueType, protoIfaceType: return nil, nil + case jsonListValueType, jsonStructType: + // skip handling + default: + if typeDesc.Implements(protoIfaceType) { + return nil, nil + } } case reflect.Interface: nv := n.Value() diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index 6cc95c276..bdd474c95 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -427,22 +427,49 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { return structpb.NullValue_NULL_VALUE, true, nil } case *wrapperspb.BoolValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.BytesValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.DoubleValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.FloatValue: + if v == nil { + return nil, true, nil + } return float64(v.GetValue()), true, nil case *wrapperspb.Int32Value: + if v == nil { + return nil, true, nil + } return int64(v.GetValue()), true, nil case *wrapperspb.Int64Value: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.StringValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.UInt32Value: + if v == nil { + return nil, true, nil + } return uint64(v.GetValue()), true, nil case *wrapperspb.UInt64Value: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil } return msg, false, nil diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index c5ff05fdb..936a4e28b 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -585,6 +585,14 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { refKind := refValue.Kind() switch refKind { case reflect.Array, reflect.Slice: + if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { + if refValue.CanAddr() { + return Bytes(refValue.Bytes()), true + } + tmp := reflect.New(refValue.Type()) + tmp.Elem().Set(refValue) + return Bytes(tmp.Elem().Bytes()), true + } return NewDynamicList(a, v), true case reflect.Map: return NewDynamicMap(a, v), true diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go index 42dd371aa..91c10f08f 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/iterator.go +++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go @@ -34,3 +34,16 @@ type Iterator interface { // Next returns the next element. Next() ref.Val } + +// Foldable aggregate types support iteration over (key, value) or (index, value) pairs. +type Foldable interface { + // Fold invokes the Folder.FoldEntry for all entries in the type + Fold(Folder) +} + +// Folder performs a fold on a given entry and indicates whether to continue folding. +type Folder interface { + // FoldEntry indicates the key, value pair associated with the entry. + // If the output is true, continue folding. Otherwise, terminate the fold. + FoldEntry(key, val any) bool +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go index 5cf2593f3..e54781a60 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/lister.go +++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go @@ -27,6 +27,9 @@ type Lister interface { } // MutableLister interface which emits an immutable result after an intermediate computation. +// +// Note, this interface is intended only to be used within Comprehensions where the mutable +// value is not directly observable within the user-authored CEL expression. type MutableLister interface { Lister ToImmutableList() Lister diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go index 2f7c919a8..d13333f3f 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/mapper.go +++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go @@ -31,3 +31,18 @@ type Mapper interface { // (Unknown|Err, false). Find(key ref.Val) (ref.Val, bool) } + +// MutableMapper interface which emits an immutable result after an intermediate computation. +// +// Note, this interface is intended only to be used within Comprehensions where the mutable +// value is not directly observable within the user-authored CEL expression. +type MutableMapper interface { + Mapper + + // Insert a key, value pair into the map, returning the map if the insert is successful + // and an error if key already exists in the mutable map. + Insert(k, v ref.Val) ref.Val + + // ToImmutableMap converts a mutable map into an immutable map. + ToImmutableMap() Mapper +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go index 6da3e6a3e..51a09df56 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/traits.go +++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go @@ -59,6 +59,21 @@ const ( // SizerType types support the size() method. SizerType - // SubtractorType type support '-' operations. + // SubtractorType types support '-' operations. SubtractorType + + // FoldableType types support comprehensions v2 macros which iterate over (key, value) pairs. + FoldableType +) + +const ( + // ListerType supports a set of traits necessary for list operations. + // + // The ListerType is syntactic sugar and not intended to be a perfect reflection of all List operators. + ListerType = AdderType | ContainerType | IndexerType | IterableType | SizerType + + // MapperType supports a set of traits necessary for map operations. + // + // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators. + MapperType = ContainerType | IndexerType | IterableType | SizerType ) diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go index 6c3d5f719..1c5b6c40c 100644 --- a/vendor/github.com/google/cel-go/common/types/types.go +++ b/vendor/github.com/google/cel-go/common/types/types.go @@ -19,10 +19,13 @@ import ( "reflect" "strings" + "google.golang.org/protobuf/proto" + chkdecls "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -666,85 +669,99 @@ func TypeToExprType(t *Type) (*exprpb.Type, error) { // ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. func ExprTypeToType(t *exprpb.Type) (*Type, error) { + return AlphaProtoAsType(t) +} + +// AlphaProtoAsType converts a CEL v1alpha1.Type protobuf type to a CEL-native type representation. +func AlphaProtoAsType(t *exprpb.Type) (*Type, error) { + canonical := &celpb.Type{} + if err := convertProto(t, canonical); err != nil { + return nil, err + } + return ProtoAsType(canonical) +} + +// ProtoAsType converts a canonical CEL celpb.Type protobuf type to a CEL-native type representation. +func ProtoAsType(t *celpb.Type) (*Type, error) { switch t.GetTypeKind().(type) { - case *exprpb.Type_Dyn: + case *celpb.Type_Dyn: return DynType, nil - case *exprpb.Type_AbstractType_: + case *celpb.Type_AbstractType_: paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) for i, p := range t.GetAbstractType().GetParameterTypes() { - pt, err := ExprTypeToType(p) + pt, err := ProtoAsType(p) if err != nil { return nil, err } paramTypes[i] = pt } return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil - case *exprpb.Type_ListType_: - et, err := ExprTypeToType(t.GetListType().GetElemType()) + case *celpb.Type_ListType_: + et, err := ProtoAsType(t.GetListType().GetElemType()) if err != nil { return nil, err } return NewListType(et), nil - case *exprpb.Type_MapType_: - kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) + case *celpb.Type_MapType_: + kt, err := ProtoAsType(t.GetMapType().GetKeyType()) if err != nil { return nil, err } - vt, err := ExprTypeToType(t.GetMapType().GetValueType()) + vt, err := ProtoAsType(t.GetMapType().GetValueType()) if err != nil { return nil, err } return NewMapType(kt, vt), nil - case *exprpb.Type_MessageType: + case *celpb.Type_MessageType: return NewObjectType(t.GetMessageType()), nil - case *exprpb.Type_Null: + case *celpb.Type_Null: return NullType, nil - case *exprpb.Type_Primitive: + case *celpb.Type_Primitive: switch t.GetPrimitive() { - case exprpb.Type_BOOL: + case celpb.Type_BOOL: return BoolType, nil - case exprpb.Type_BYTES: + case celpb.Type_BYTES: return BytesType, nil - case exprpb.Type_DOUBLE: + case celpb.Type_DOUBLE: return DoubleType, nil - case exprpb.Type_INT64: + case celpb.Type_INT64: return IntType, nil - case exprpb.Type_STRING: + case celpb.Type_STRING: return StringType, nil - case exprpb.Type_UINT64: + case celpb.Type_UINT64: return UintType, nil default: return nil, fmt.Errorf("unsupported primitive type: %v", t) } - case *exprpb.Type_TypeParam: + case *celpb.Type_TypeParam: return NewTypeParamType(t.GetTypeParam()), nil - case *exprpb.Type_Type: + case *celpb.Type_Type: if t.GetType().GetTypeKind() != nil { - p, err := ExprTypeToType(t.GetType()) + p, err := ProtoAsType(t.GetType()) if err != nil { return nil, err } return NewTypeTypeWithParam(p), nil } return TypeType, nil - case *exprpb.Type_WellKnown: + case *celpb.Type_WellKnown: switch t.GetWellKnown() { - case exprpb.Type_ANY: + case celpb.Type_ANY: return AnyType, nil - case exprpb.Type_DURATION: + case celpb.Type_DURATION: return DurationType, nil - case exprpb.Type_TIMESTAMP: + case celpb.Type_TIMESTAMP: return TimestampType, nil default: return nil, fmt.Errorf("unsupported well-known type: %v", t) } - case *exprpb.Type_Wrapper: - t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) + case *celpb.Type_Wrapper: + t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}}) if err != nil { return nil, err } return NewNullableType(t), nil - case *exprpb.Type_Error: + case *celpb.Type_Error: return ErrorType, nil default: return nil, fmt.Errorf("unsupported type: %v", t) @@ -776,6 +793,23 @@ func maybeForeignType(t ref.Type) *Type { return NewObjectType(t.TypeName(), traitMask) } +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} + +func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type { + return &celpb.Type{ + TypeKind: &celpb.Type_Primitive{ + Primitive: primitive, + }, + } +} + var ( checkedWellKnowns = map[string]*Type{ // Wrapper types. @@ -820,4 +854,11 @@ var ( } structTypeTraitMask = traits.FieldTesterType | traits.IndexerType + + boolType = primitiveType(celpb.Type_BOOL) + bytesType = primitiveType(celpb.Type_BYTES) + doubleType = primitiveType(celpb.Type_DOUBLE) + intType = primitiveType(celpb.Type_INT64) + stringType = primitiveType(celpb.Type_STRING) + uintType = primitiveType(celpb.Type_UINT64) ) diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel index db223da2f..1fece7006 100644 --- a/vendor/github.com/google/cel-go/ext/BUILD.bazel +++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//cel:go_default_library", "//checker:go_default_library", "//common/ast:go_default_library", + "//common/decls:go_default_library", "//common/overloads:go_default_library", "//common/operators:go_default_library", "//common/types:go_default_library", @@ -31,6 +32,7 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter:go_default_library", + "//parser:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", "@org_golang_google_protobuf//types/known/structpb", @@ -61,8 +63,8 @@ go_test( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//test:go_default_library", - "//test/proto2pb:go_default_library", - "//test/proto3pb:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md index 2fac0cb22..07e544d0d 100644 --- a/vendor/github.com/google/cel-go/ext/README.md +++ b/vendor/github.com/google/cel-go/ext/README.md @@ -3,12 +3,12 @@ CEL extensions are a related set of constants, functions, macros, or other features which may not be covered by the core CEL spec. -## Bindings +## Bindings Returns a cel.EnvOption to configure support for local variable bindings in expressions. -# Cel.Bind +### Cel.Bind Binds a simple identifier to an initialization expression which may be used in a subsequenct result expression. Bindings may also be nested within each @@ -19,11 +19,11 @@ other. Examples: cel.bind(a, 'hello', - cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" + cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" // Avoid a list allocation within the exists comprehension. cel.bind(valid_values, [a, b, c], - [d, e, f].exists(elem, elem in valid_values)) + [d, e, f].exists(elem, elem in valid_values)) Local bindings are not guaranteed to be evaluated before use. @@ -100,7 +100,8 @@ argument. Simple numeric and list literals are supported as valid argument types; however, other literals will be flagged as errors during macro expansion. If the argument expression does not resolve to a numeric or list(numeric) type during type-checking, or during runtime then an error -will be produced. If a list argument is empty, this too will produce an error. +will be produced. If a list argument is empty, this too will produce an +error. math.least(, ...) -> @@ -117,6 +118,244 @@ Examples: math.least(a, b) // check-time error if a or b is non-numeric math.least(dyn('string')) // runtime error +### Math.BitOr + +Introduced at version: 1 + +Performs a bitwise-OR operation over two int or uint values. + + math.bitOr(, ) -> + math.bitOr(, ) -> + +Examples: + + math.bitOr(1u, 2u) // returns 3u + math.bitOr(-2, -4) // returns -2 + +### Math.BitAnd + +Introduced at version: 1 + +Performs a bitwise-AND operation over two int or uint values. + + math.bitAnd(, ) -> + math.bitAnd(, ) -> + +Examples: + + math.bitAnd(3u, 2u) // return 2u + math.bitAnd(3, 5) // returns 3 + math.bitAnd(-3, -5) // returns -7 + +### Math.BitXor + +Introduced at version: 1 + + math.bitXor(, ) -> + math.bitXor(, ) -> + +Performs a bitwise-XOR operation over two int or uint values. + +Examples: + + math.bitXor(3u, 5u) // returns 6u + math.bitXor(1, 3) // returns 2 + +### Math.BitNot + +Introduced at version: 1 + +Function which accepts a single int or uint and performs a bitwise-NOT +ones-complement of the given binary value. + + math.bitNot() -> + math.bitNot() -> + +Examples + + math.bitNot(1) // returns -1 + math.bitNot(-1) // return 0 + math.bitNot(0u) // returns 18446744073709551615u + +### Math.BitShiftLeft + +Introduced at version: 1 + +Perform a left shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will be always be returned +since the number of bits shifted is greater than or equal to the total bit +length of the number being shifted. Negative valued bit shifts will result +in a runtime error. + + math.bitShiftLeft(, ) -> + math.bitShiftLeft(, ) -> + +Examples + + math.bitShiftLeft(1, 2) // returns 4 + math.bitShiftLeft(-1, 2) // returns -4 + math.bitShiftLeft(1u, 2) // return 4u + math.bitShiftLeft(1u, 200) // returns 0u + +### Math.BitShiftRight + +Introduced at version: 1 + +Perform a right shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will always be returned since +the number of bits shifted is greater than or equal to the total bit length +of the number being shifted. Negative valued bit shifts will result in a +runtime error. + +The sign bit extension will not be preserved for this operation: vacant bits +on the left are filled with 0. + + math.bitShiftRight(, ) -> + math.bitShiftRight(, ) -> + +Examples + + math.bitShiftRight(1024, 2) // returns 256 + math.bitShiftRight(1024u, 2) // returns 256u + math.bitShiftRight(1024u, 64) // returns 0u + +### Math.Ceil + +Introduced at version: 1 + +Compute the ceiling of a double value. + + math.ceil() -> + +Examples: + + math.ceil(1.2) // returns 2.0 + math.ceil(-1.2) // returns -1.0 + +### Math.Floor + +Introduced at version: 1 + +Compute the floor of a double value. + + math.floor() -> + +Examples: + + math.floor(1.2) // returns 1.0 + math.floor(-1.2) // returns -2.0 + +### Math.Round + +Introduced at version: 1 + +Rounds the double value to the nearest whole number with ties rounding away +from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. + + math.round() -> + +Examples: + + math.round(1.2) // returns 1.0 + math.round(1.5) // returns 2.0 + math.round(-1.5) // returns -2.0 + +### Math.Trunc + +Introduced at version: 1 + +Truncates the fractional portion of the double value. + + math.trunc() -> + +Examples: + + math.trunc(-1.3) // returns -1.0 + math.trunc(1.3) // returns 1.0 + +### Math.Abs + +Introduced at version: 1 + +Returns the absolute value of the numeric type provided as input. If the +value is NaN, the output is NaN. If the input is int64 min, the function +will result in an overflow error. + + math.abs() -> + math.abs() -> + math.abs() -> + +Examples: + + math.abs(-1) // returns 1 + math.abs(1) // returns 1 + math.abs(-9223372036854775808) // overlflow error + +### Math.Sign + +Introduced at version: 1 + +Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +uint depending on the overload. For floating point values, if NaN is +provided as input, the output is also NaN. The implementation does not +differentiate between positive and negative zero. + + math.sign() -> + math.sign() -> + math.sign() -> + +Examples: + + math.sign(-42) // returns -1 + math.sign(0) // returns 0 + math.sign(42) // returns 1 + +### Math.IsInf + +Introduced at version: 1 + +Returns true if the input double value is -Inf or +Inf. + + math.isInf() -> + +Examples: + + math.isInf(1.0/0.0) // returns true + math.isInf(1.2) // returns false + +### Math.IsNaN + +Introduced at version: 1 + +Returns true if the input double value is NaN, false otherwise. + + math.isNaN() -> + +Examples: + + math.isNaN(0.0/0.0) // returns true + math.isNaN(1.2) // returns false + +### Math.IsFinite + +Introduced at version: 1 + +Returns true if the value is a finite number. Equivalent in behavior to: +!math.isNaN(double) && !math.isInf(double) + + math.isFinite() -> + +Examples: + + math.isFinite(0.0/0.0) // returns false + math.isFinite(1.2) // returns true + ## Protos Protos configure extended macros and functions for proto manipulation. @@ -154,6 +393,65 @@ Example: Extended functions for list manipulation. As a general note, all indices are zero-based. +### Distinct + +**Introduced in version 2** + +Returns the distinct elements of a list. + + .distinct() -> + +Examples: + + [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3] + ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"] + [1, "b", 2, "b"].distinct() // return [1, "b", 2] + +### Flatten + +**Introduced in version 1** + +Flattens a list recursively. +If an optional depth is provided, the list is flattened to a the specificied level. +A negative depth value will result in an error. + + .flatten() -> + .flatten(, ) -> + +Examples: + + [1,[2,3],[4]].flatten() // return [1, 2, 3, 4] + [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]] + [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4] + [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]] + [1,[2,[3,[4]]]].flatten(-1) // error + +### Range + +**Introduced in version 2** + +Returns a list of integers from 0 to n-1. + + lists.range() -> + +Examples: + + lists.range(5) -> [0, 1, 2, 3, 4] + + +### Reverse + +**Introduced in version 2** + +Returns the elements of a list in reverse order. + + .reverse() -> + +Examples: + + [5, 3, 1, 2].reverse() // return [2, 1, 3, 5] + + ### Slice @@ -164,7 +462,43 @@ Returns a new sub-list using the indexes provided. Examples: [1,2,3,4].slice(1, 3) // return [2, 3] - [1,2,3,4].slice(2, 4) // return [3 ,4] + [1,2,3,4].slice(2, 4) // return [3, 4] + +### Sort + +**Introduced in version 2** + +Sorts a list with comparable elements. If the element type is not comparable +or the element types are not the same, the function will produce an error. + + .sort() -> + T in {int, uint, double, bool, duration, timestamp, string, bytes} + +Examples: + + [3, 2, 1].sort() // return [1, 2, 3] + ["b", "c", "a"].sort() // return ["a", "b", "c"] + [1, "b"].sort() // error + [[1, 2, 3]].sort() // error + +### SortBy + +**Introduced in version 2** + +Sorts a list by a key value, i.e., the order is determined by the result of +an expression applied to each element of the list. + + .sortBy(, ) -> + keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes} + +Examples: + + [ + Player { name: "foo", score: 0 }, + Player { name: "bar", score: -10 }, + Player { name: "baz", score: 1000 }, + ].sortBy(e, e.score).map(e, e.name) + == ["bar", "foo", "baz"] ## Sets @@ -259,7 +593,8 @@ Examples: 'hello mellow'.indexOf('jello') // returns -1 'hello mellow'.indexOf('', 2) // returns 2 'hello mellow'.indexOf('ello', 2) // returns 7 - 'hello mellow'.indexOf('ello', 20) // error + 'hello mellow'.indexOf('ello', 20) // returns -1 + 'hello mellow'.indexOf('ello', -1) // error ### Join @@ -273,10 +608,10 @@ elements in the resulting string. Examples: - ['hello', 'mellow'].join() // returns 'hellomellow' - ['hello', 'mellow'].join(' ') // returns 'hello mellow' - [].join() // returns '' - [].join('/') // returns '' + ['hello', 'mellow'].join() // returns 'hellomellow' + ['hello', 'mellow'].join(' ') // returns 'hello mellow' + [].join() // returns '' + [].join('/') // returns '' ### LastIndexOf @@ -297,6 +632,7 @@ Examples: 'hello mellow'.lastIndexOf('ello') // returns 7 'hello mellow'.lastIndexOf('jello') // returns -1 'hello mellow'.lastIndexOf('ello', 6) // returns 1 + 'hello mellow'.lastIndexOf('ello', 20) // returns -1 'hello mellow'.lastIndexOf('ello', -1) // error ### LowerAscii @@ -427,4 +763,137 @@ It can be located in Version 3 of strings. Examples: 'gums'.reverse() // returns 'smug' - 'John Smith'.reverse() // returns 'htimS nhoJ' \ No newline at end of file + 'John Smith'.reverse() // returns 'htimS nhoJ' + +## TwoVarComprehensions + +TwoVarComprehensions introduces support for two-variable comprehensions. + +The two-variable form of comprehensions looks similar to the one-variable +counterparts. Where possible, the same macro names were used and additional +macro signatures added. The notable distinction for two-variable comprehensions +is the introduction of `transformList`, `transformMap`, and `transformMapEntry` +support for list and map types rather than the more traditional `map` and +`filter` macros. + +### All + +Comprehension which tests whether all elements in the list or map satisfy a +given predicate. The `all` macro evaluates in a manner consistent with logical +AND and will short-circuit when encountering a `false` value. + + .all(indexVar, valueVar, ) -> bool + .all(keyVar, valueVar, ) -> bool + +Examples: + + [1, 2, 3].all(i, j, i < j) // returns true + {'hello': 'world', 'taco': 'taco'}.all(k, v, k != v) // returns false + + // Combines two-variable comprehension with single variable + {'h': ['hello', 'hi'], 'j': ['joke', 'jog']} + .all(k, vals, vals.all(v, v.startsWith(k))) // returns true + +### Exists + +Comprehension which tests whether any element in a list or map exists which +satisfies a given predicate. The `exists` macro evaluates in a manner consistent +with logical OR and will short-circuit when encountering a `true` value. + + .exists(indexVar, valueVar, ) -> bool + .exists(keyVar, valueVar, ) -> bool + +Examples: + + {'greeting': 'hello', 'farewell': 'goodbye'} + .exists(k, v, k.startsWith('good') || v.endsWith('bye')) // returns true + [1, 2, 4, 8, 16].exists(i, v, v == 1024 && i == 10) // returns false + +### ExistsOne + +Comprehension which tests whether exactly one element in a list or map exists +which satisfies a given predicate expression. This comprehension does not +short-circuit in keeping with the one-variable exists one macro semantics. + + .existsOne(indexVar, valueVar, ) + .existsOne(keyVar, valueVar, ) + +This macro may also be used with the `exists_one` function name, for +compatibility with the one-variable macro of the same name. + +Examples: + + [1, 2, 1, 3, 1, 4].existsOne(i, v, i == 1 || v == 1) // returns false + [1, 1, 2, 2, 3, 3].existsOne(i, v, i == 2 && v == 2) // returns true + {'i': 0, 'j': 1, 'k': 2}.existsOne(i, v, i == 'l' || v == 1) // returns true + +### TransformList + +Comprehension which converts a map or a list into a list value. The output +expression of the comprehension determines the contents of the output list. +Elements in the list may optionally be filtered according to a predicate +expression, where elements that satisfy the predicate are transformed. + + .transformList(indexVar, valueVar, ) + .transformList(indexVar, valueVar, , ) + .transformList(keyVar, valueVar, ) + .transformList(keyVar, valueVar, , ) + +Examples: + + [1, 2, 3].transformList(indexVar, valueVar, + (indexVar * valueVar) + valueVar) // returns [1, 4, 9] + [1, 2, 3].transformList(indexVar, valueVar, indexVar % 2 == 0 + (indexVar * valueVar) + valueVar) // returns [1, 9] + {'greeting': 'hello', 'farewell': 'goodbye'} + .transformList(k, _, k) // returns ['greeting', 'farewell'] + {'greeting': 'hello', 'farewell': 'goodbye'} + .transformList(_, v, v) // returns ['hello', 'goodbye'] + +### TransformMap + +Comprehension which converts a map or a list into a map value. The output +expression of the comprehension determines the value of the output map entry; +however, the key remains fixed. Elements in the map may optionally be filtered +according to a predicate expression, where elements that satisfy the predicate +are transformed. + + .transformMap(indexVar, valueVar, ) + .transformMap(indexVar, valueVar, , ) + .transformMap(keyVar, valueVar, ) + .transformMap(keyVar, valueVar, , ) + +Examples: + + [1, 2, 3].transformMap(indexVar, valueVar, + (indexVar * valueVar) + valueVar) // returns {0: 1, 1: 4, 2: 9} + [1, 2, 3].transformMap(indexVar, valueVar, indexVar % 2 == 0 + (indexVar * valueVar) + valueVar) // returns {0: 1, 2: 9} + {'greeting': 'hello'}.transformMap(k, v, v + '!') // returns {'greeting': 'hello!'} + +### TransformMapEntry + +Comprehension which converts a map or a list into a map value; however, this +transform expects the entry expression be a map literal. If the transform +produces an entry which duplicates a key in the target map, the comprehension +will error. Note, that key equality is determined using CEL equality which +asserts that numeric values which are equal, even if they don't have the same +type will cause a key collision. + +Elements in the map may optionally be filtered according to a predicate +expression, where elements that satisfy the predicate are transformed. + + .transformMap(indexVar, valueVar, ) + .transformMap(indexVar, valueVar, , ) + .transformMap(keyVar, valueVar, ) + .transformMap(keyVar, valueVar, , ) + +Examples: + + // returns {'hello': 'greeting'} + {'greeting': 'hello'}.transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) + // reverse lookup, require all values in list be unique + [1, 2, 3].transformMapEntry(indexVar, valueVar, {valueVar: indexVar}) + + {'greeting': 'aloha', 'farewell': 'aloha'} + .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go index 2c6cc627f..50cf4fb3d 100644 --- a/vendor/github.com/google/cel-go/ext/bindings.go +++ b/vendor/github.com/google/cel-go/ext/bindings.go @@ -15,9 +15,19 @@ package ext import ( + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + "github.com/google/cel-go/cel" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" ) // Bindings returns a cel.EnvOption to configure support for local variable @@ -41,35 +51,120 @@ import ( // [d, e, f].exists(elem, elem in valid_values)) // // Local bindings are not guaranteed to be evaluated before use. -func Bindings() cel.EnvOption { - return cel.Lib(celBindings{}) +func Bindings(options ...BindingsOption) cel.EnvOption { + b := &celBindings{version: math.MaxUint32} + for _, o := range options { + b = o(b) + } + return cel.Lib(b) } const ( celNamespace = "cel" bindMacro = "bind" + blockFunc = "@block" unusedIterVar = "#unused" ) -type celBindings struct{} +// BindingsOption declares a functional operator for configuring the Bindings library behavior. +type BindingsOption func(*celBindings) *celBindings + +// BindingsVersion sets the version of the bindings library to an explicit version. +func BindingsVersion(version uint32) BindingsOption { + return func(lib *celBindings) *celBindings { + lib.version = version + return lib + } +} + +type celBindings struct { + version uint32 +} -func (celBindings) LibraryName() string { +func (*celBindings) LibraryName() string { return "cel.lib.ext.cel.bindings" } -func (celBindings) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ +func (lib *celBindings) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ cel.Macros( // cel.bind(var, , ) cel.ReceiverMacro(bindMacro, 3, celBind), ), } + if lib.version >= 1 { + // The cel.@block signature takes a list of subexpressions and a typed expression which is + // used as the output type. + paramType := cel.TypeParamType("T") + opts = append(opts, + cel.Function("cel.@block", + cel.Overload("cel_block_list", + []*cel.Type{cel.ListType(cel.DynType), paramType}, paramType)), + ) + opts = append(opts, cel.ASTValidators(blockValidationExemption{})) + } + return opts } -func (celBindings) ProgramOptions() []cel.ProgramOption { +func (lib *celBindings) ProgramOptions() []cel.ProgramOption { + if lib.version >= 1 { + celBlockPlan := func(i interpreter.Interpretable) (interpreter.Interpretable, error) { + call, ok := i.(interpreter.InterpretableCall) + if !ok { + return i, nil + } + switch call.Function() { + case "cel.@block": + args := call.Args() + if len(args) != 2 { + return nil, fmt.Errorf("cel.@block expects two arguments, but got %d", len(args)) + } + expr := args[1] + // Non-empty block + if block, ok := args[0].(interpreter.InterpretableConstructor); ok { + slotExprs := block.InitVals() + return newDynamicBlock(slotExprs, expr), nil + } + // Constant valued block which can happen during runtime optimization. + if cons, ok := args[0].(interpreter.InterpretableConst); ok { + if cons.Value().Type() == types.ListType { + l := cons.Value().(traits.Lister) + if l.Size().Equal(types.IntZero) == types.True { + return args[1], nil + } + return newConstantBlock(l, expr), nil + } + } + return nil, errors.New("cel.@block expects a list constructor as the first argument") + default: + return i, nil + } + } + return []cel.ProgramOption{cel.CustomDecorator(celBlockPlan)} + } return []cel.ProgramOption{} } +type blockValidationExemption struct{} + +// Name returns the name of the validator. +func (blockValidationExemption) Name() string { + return "cel.lib.ext.validate.functions.cel.block" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (blockValidationExemption) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "cel.@block") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate is a no-op as the intent is to simply disable strong type-checks for list literals during +// when they occur within cel.@block calls as the arg types have already been validated. +func (blockValidationExemption) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { +} + func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(celNamespace, target) { return nil, nil @@ -94,3 +189,148 @@ func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Ex resultExpr, ), nil } + +func newDynamicBlock(slotExprs []interpreter.Interpretable, expr interpreter.Interpretable) interpreter.Interpretable { + bs := &dynamicBlock{ + slotExprs: slotExprs, + expr: expr, + } + bs.slotActivationPool = &sync.Pool{ + New: func() any { + slotCount := len(slotExprs) + sa := &dynamicSlotActivation{ + slotExprs: slotExprs, + slotCount: slotCount, + slotVals: make([]*slotVal, slotCount), + } + for i := 0; i < slotCount; i++ { + sa.slotVals[i] = &slotVal{} + } + return sa + }, + } + return bs +} + +type dynamicBlock struct { + slotExprs []interpreter.Interpretable + expr interpreter.Interpretable + slotActivationPool *sync.Pool +} + +// ID implements the Interpretable interface method. +func (b *dynamicBlock) ID() int64 { + return b.expr.ID() +} + +// Eval implements the Interpretable interface method. +func (b *dynamicBlock) Eval(activation interpreter.Activation) ref.Val { + sa := b.slotActivationPool.Get().(*dynamicSlotActivation) + sa.Activation = activation + defer b.clearSlots(sa) + return b.expr.Eval(sa) +} + +func (b *dynamicBlock) clearSlots(sa *dynamicSlotActivation) { + sa.reset() + b.slotActivationPool.Put(sa) +} + +type slotVal struct { + value *ref.Val + visited bool +} + +type dynamicSlotActivation struct { + interpreter.Activation + slotExprs []interpreter.Interpretable + slotCount int + slotVals []*slotVal +} + +// ResolveName implements the Activation interface method but handles variables prefixed with `@index` +// as special variables which exist within the slot-based memory of the cel.@block() where each slot +// refers to an expression which must be computed only once. +func (sa *dynamicSlotActivation) ResolveName(name string) (any, bool) { + if idx, found := matchSlot(name, sa.slotCount); found { + v := sa.slotVals[idx] + if v.visited { + // Return not found if the index expression refers to itself + if v.value == nil { + return nil, false + } + return *v.value, true + } + v.visited = true + val := sa.slotExprs[idx].Eval(sa) + v.value = &val + return val, true + } + return sa.Activation.ResolveName(name) +} + +func (sa *dynamicSlotActivation) reset() { + sa.Activation = nil + for _, sv := range sa.slotVals { + sv.visited = false + sv.value = nil + } +} + +func newConstantBlock(slots traits.Lister, expr interpreter.Interpretable) interpreter.Interpretable { + count := slots.Size().(types.Int) + return &constantBlock{slots: slots, slotCount: int(count), expr: expr} +} + +type constantBlock struct { + slots traits.Lister + slotCount int + expr interpreter.Interpretable +} + +// ID implements the interpreter.Interpretable interface method. +func (b *constantBlock) ID() int64 { + return b.expr.ID() +} + +// Eval implements the interpreter.Interpretable interface method, and will proxy @index prefixed variable +// lookups into a set of constant slots determined from the plan step. +func (b *constantBlock) Eval(activation interpreter.Activation) ref.Val { + vars := constantSlotActivation{Activation: activation, slots: b.slots, slotCount: b.slotCount} + return b.expr.Eval(vars) +} + +type constantSlotActivation struct { + interpreter.Activation + slots traits.Lister + slotCount int +} + +// ResolveName implements Activation interface method and proxies @index prefixed lookups into the slot +// activation associated with the block scope. +func (sa constantSlotActivation) ResolveName(name string) (any, bool) { + if idx, found := matchSlot(name, sa.slotCount); found { + return sa.slots.Get(types.Int(idx)), true + } + return sa.Activation.ResolveName(name) +} + +func matchSlot(name string, slotCount int) (int, bool) { + if idx, found := strings.CutPrefix(name, indexPrefix); found { + idx, err := strconv.Atoi(idx) + // Return not found if the index is not numeric + if err != nil { + return -1, false + } + // Return not found if the index is not a valid slot + if idx < 0 || idx >= slotCount { + return -1, false + } + return idx, true + } + return -1, false +} + +var ( + indexPrefix = "@index" +) diff --git a/vendor/github.com/google/cel-go/ext/comprehensions.go b/vendor/github.com/google/cel-go/ext/comprehensions.go new file mode 100644 index 000000000..1428558d8 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/comprehensions.go @@ -0,0 +1,410 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" +) + +const ( + mapInsert = "cel.@mapInsert" + mapInsertOverloadMap = "@mapInsert_map_map" + mapInsertOverloadKeyValue = "@mapInsert_map_key_value" +) + +// TwoVarComprehensions introduces support for two-variable comprehensions. +// +// The two-variable form of comprehensions looks similar to the one-variable counterparts. +// Where possible, the same macro names were used and additional macro signatures added. +// The notable distinction for two-variable comprehensions is the introduction of +// `transformList`, `transformMap`, and `transformMapEntry` support for list and map types +// rather than the more traditional `map` and `filter` macros. +// +// # All +// +// Comprehension which tests whether all elements in the list or map satisfy a given +// predicate. The `all` macro evaluates in a manner consistent with logical AND and will +// short-circuit when encountering a `false` value. +// +// .all(indexVar, valueVar, ) -> bool +// .all(keyVar, valueVar, ) -> bool +// +// Examples: +// +// [1, 2, 3].all(i, j, i < j) // returns true +// {'hello': 'world', 'taco': 'taco'}.all(k, v, k != v) // returns false +// +// // Combines two-variable comprehension with single variable +// {'h': ['hello', 'hi'], 'j': ['joke', 'jog']} +// .all(k, vals, vals.all(v, v.startsWith(k))) // returns true +// +// # Exists +// +// Comprehension which tests whether any element in a list or map exists which satisfies +// a given predicate. The `exists` macro evaluates in a manner consistent with logical OR +// and will short-circuit when encountering a `true` value. +// +// .exists(indexVar, valueVar, ) -> bool +// .exists(keyVar, valueVar, ) -> bool +// +// Examples: +// +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .exists(k, v, k.startsWith('good') || v.endsWith('bye')) // returns true +// [1, 2, 4, 8, 16].exists(i, v, v == 1024 && i == 10) // returns false +// +// # ExistsOne +// +// Comprehension which tests whether exactly one element in a list or map exists which +// satisfies a given predicate expression. This comprehension does not short-circuit in +// keeping with the one-variable exists one macro semantics. +// +// .existsOne(indexVar, valueVar, ) +// .existsOne(keyVar, valueVar, ) +// +// This macro may also be used with the `exists_one` function name, for compatibility +// with the one-variable macro of the same name. +// +// Examples: +// +// [1, 2, 1, 3, 1, 4].existsOne(i, v, i == 1 || v == 1) // returns false +// [1, 1, 2, 2, 3, 3].existsOne(i, v, i == 2 && v == 2) // returns true +// {'i': 0, 'j': 1, 'k': 2}.existsOne(i, v, i == 'l' || v == 1) // returns true +// +// # TransformList +// +// Comprehension which converts a map or a list into a list value. The output expression +// of the comprehension determines the contents of the output list. Elements in the list +// may optionally be filtered according to a predicate expression, where elements that +// satisfy the predicate are transformed. +// +// .transformList(indexVar, valueVar, ) +// .transformList(indexVar, valueVar, , ) +// .transformList(keyVar, valueVar, ) +// .transformList(keyVar, valueVar, , ) +// +// Examples: +// +// [1, 2, 3].transformList(indexVar, valueVar, +// (indexVar * valueVar) + valueVar) // returns [1, 4, 9] +// [1, 2, 3].transformList(indexVar, valueVar, indexVar % 2 == 0 +// (indexVar * valueVar) + valueVar) // returns [1, 9] +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .transformList(k, _, k) // returns ['greeting', 'farewell'] +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .transformList(_, v, v) // returns ['hello', 'goodbye'] +// +// # TransformMap +// +// Comprehension which converts a map or a list into a map value. The output expression +// of the comprehension determines the value of the output map entry; however, the key +// remains fixed. Elements in the map may optionally be filtered according to a predicate +// expression, where elements that satisfy the predicate are transformed. +// +// .transformMap(indexVar, valueVar, ) +// .transformMap(indexVar, valueVar, , ) +// .transformMap(keyVar, valueVar, ) +// .transformMap(keyVar, valueVar, , ) +// +// Examples: +// +// [1, 2, 3].transformMap(indexVar, valueVar, +// (indexVar * valueVar) + valueVar) // returns {0: 1, 1: 4, 2: 9} +// [1, 2, 3].transformMap(indexVar, valueVar, indexVar % 2 == 0 +// (indexVar * valueVar) + valueVar) // returns {0: 1, 2: 9} +// {'greeting': 'hello'}.transformMap(k, v, v + '!') // returns {'greeting': 'hello!'} +// +// # TransformMapEntry +// +// Comprehension which converts a map or a list into a map value; however, this transform +// expects the entry expression be a map literal. If the tranform produces an entry which +// duplicates a key in the target map, the comprehension will error. Note, that key +// equality is determined using CEL equality which asserts that numeric values which are +// equal, even if they don't have the same type will cause a key collision. +// +// Elements in the map may optionally be filtered according to a predicate expression, where +// elements that satisfy the predicate are transformed. +// +// .transformMap(indexVar, valueVar, ) +// .transformMap(indexVar, valueVar, , ) +// .transformMap(keyVar, valueVar, ) +// .transformMap(keyVar, valueVar, , ) +// +// Examples: +// +// // returns {'hello': 'greeting'} +// {'greeting': 'hello'}.transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) +// // reverse lookup, require all values in list be unique +// [1, 2, 3].transformMapEntry(indexVar, valueVar, {valueVar: indexVar}) +// +// {'greeting': 'aloha', 'farewell': 'aloha'} +// .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key +func TwoVarComprehensions() cel.EnvOption { + return cel.Lib(compreV2Lib{}) +} + +type compreV2Lib struct{} + +// LibraryName implements that SingletonLibrary interface method. +func (compreV2Lib) LibraryName() string { + return "cel.lib.ext.comprev2" +} + +// CompileOptions implements the cel.Library interface method. +func (compreV2Lib) CompileOptions() []cel.EnvOption { + kType := cel.TypeParamType("K") + vType := cel.TypeParamType("V") + mapKVType := cel.MapType(kType, vType) + opts := []cel.EnvOption{ + cel.Macros( + cel.ReceiverMacro("all", 3, quantifierAll), + cel.ReceiverMacro("exists", 3, quantifierExists), + cel.ReceiverMacro("existsOne", 3, quantifierExistsOne), + cel.ReceiverMacro("exists_one", 3, quantifierExistsOne), + cel.ReceiverMacro("transformList", 3, transformList), + cel.ReceiverMacro("transformList", 4, transformList), + cel.ReceiverMacro("transformMap", 3, transformMap), + cel.ReceiverMacro("transformMap", 4, transformMap), + cel.ReceiverMacro("transformMapEntry", 3, transformMapEntry), + cel.ReceiverMacro("transformMapEntry", 4, transformMapEntry), + ), + cel.Function(mapInsert, + cel.Overload(mapInsertOverloadKeyValue, []*cel.Type{mapKVType, kType, vType}, mapKVType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + m := args[0].(traits.Mapper) + k := args[1] + v := args[2] + return types.InsertMapKeyValue(m, k, v) + })), + cel.Overload(mapInsertOverloadMap, []*cel.Type{mapKVType, mapKVType}, mapKVType, + cel.BinaryBinding(func(targetMap, updateMap ref.Val) ref.Val { + tm := targetMap.(traits.Mapper) + um := updateMap.(traits.Mapper) + umIt := um.Iterator() + for umIt.HasNext() == types.True { + k := umIt.Next() + updateOrErr := types.InsertMapKeyValue(tm, k, um.Get(k)) + if types.IsError(updateOrErr) { + return updateOrErr + } + tm = updateOrErr.(traits.Mapper) + } + return tm + })), + ), + } + return opts +} + +// ProgramOptions implements the cel.Library interface method +func (compreV2Lib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func quantifierAll(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.True), + /*condition=*/ mef.NewCall(operators.NotStrictlyFalse, mef.NewAccuIdent()), + /*step=*/ mef.NewCall(operators.LogicalAnd, mef.NewAccuIdent(), args[2]), + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func quantifierExists(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.False), + /*condition=*/ mef.NewCall(operators.NotStrictlyFalse, mef.NewCall(operators.LogicalNot, mef.NewAccuIdent())), + /*step=*/ mef.NewCall(operators.LogicalOr, mef.NewAccuIdent(), args[2]), + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func quantifierExistsOne(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.Int(0)), + /*condition=*/ mef.NewLiteral(types.True), + /*step=*/ mef.NewCall(operators.Conditional, args[2], + mef.NewCall(operators.Add, mef.NewAccuIdent(), mef.NewLiteral(types.Int(1))), + mef.NewAccuIdent()), + /*result=*/ mef.NewCall(operators.Equals, mef.NewAccuIdent(), mef.NewLiteral(types.Int(1))), + ), nil +} + +func transformList(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = __result__ + [transform] + step := mef.NewCall(operators.Add, mef.NewAccuIdent(), mef.NewList(transform)) + if filter != nil { + // __result__ = (filter) ? __result__ + [transform] : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewList(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func transformMap(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = cel.@mapInsert(__result__, iterVar1, transform) + step := mef.NewCall(mapInsert, mef.NewAccuIdent(), mef.NewIdent(iterVar1), transform) + if filter != nil { + // __result__ = (filter) ? cel.@mapInsert(__result__, iterVar1, transform) : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewMap(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func transformMapEntry(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = cel.@mapInsert(__result__, transform) + step := mef.NewCall(mapInsert, mef.NewAccuIdent(), transform) + if filter != nil { + // __result__ = (filter) ? cel.@mapInsert(__result__, transform) : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewMap(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func extractIterVars(mef cel.MacroExprFactory, arg0, arg1 ast.Expr) (string, string, *cel.Error) { + iterVar1, err := extractIterVar(mef, arg0) + if err != nil { + return "", "", err + } + iterVar2, err := extractIterVar(mef, arg1) + if err != nil { + return "", "", err + } + if iterVar1 == iterVar2 { + return "", "", mef.NewError(arg1.ID(), fmt.Sprintf("duplicate variable name: %s", iterVar1)) + } + if iterVar1 == parser.AccumulatorName { + return "", "", mef.NewError(arg0.ID(), "iteration variable overwrites accumulator variable") + } + if iterVar2 == parser.AccumulatorName { + return "", "", mef.NewError(arg1.ID(), "iteration variable overwrites accumulator variable") + } + return iterVar1, iterVar2, nil +} + +func extractIterVar(mef cel.MacroExprFactory, target ast.Expr) (string, *cel.Error) { + iterVar, found := extractIdent(target) + if !found { + return "", mef.NewError(target.ID(), "argument must be a simple name") + } + return iterVar, nil +} diff --git a/vendor/github.com/google/cel-go/ext/encoders.go b/vendor/github.com/google/cel-go/ext/encoders.go index 61ac0b777..ac04b1a7b 100644 --- a/vendor/github.com/google/cel-go/ext/encoders.go +++ b/vendor/github.com/google/cel-go/ext/encoders.go @@ -36,7 +36,7 @@ import ( // Examples: // // base64.decode('aGVsbG8=') // return b'hello' -// base64.decode('aGVsbG8') // error +// base64.decode('aGVsbG8') // return b'hello' // // # Base64.Encode // @@ -79,7 +79,14 @@ func (encoderLib) ProgramOptions() []cel.ProgramOption { } func base64DecodeString(str string) ([]byte, error) { - return base64.StdEncoding.DecodeString(str) + b, err := base64.StdEncoding.DecodeString(str) + if err == nil { + return b, nil + } + if _, tryAltEncoding := err.(base64.CorruptInputError); tryAltEncoding { + return base64.RawStdEncoding.DecodeString(str) + } + return nil, err } func base64EncodeBytes(bytes []byte) (string, error) { diff --git a/vendor/github.com/google/cel-go/ext/formatting.go b/vendor/github.com/google/cel-go/ext/formatting.go index 2f35b996c..dbff613b2 100644 --- a/vendor/github.com/google/cel-go/ext/formatting.go +++ b/vendor/github.com/google/cel-go/ext/formatting.go @@ -484,7 +484,7 @@ func matchConstantFormatStringWithListLiteralArgs(a *ast.AST) ast.ExprMatcher { } } formatString := call.Target() - if formatString.Kind() != ast.LiteralKind && formatString.AsLiteral().Type() != cel.StringType { + if formatString.Kind() != ast.LiteralKind || formatString.AsLiteral().Type() != cel.StringType { return false } args := call.Args() diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go index 2c00bfe3a..ccede289f 100644 --- a/vendor/github.com/google/cel-go/ext/guards.go +++ b/vendor/github.com/google/cel-go/ext/guards.go @@ -50,14 +50,18 @@ func listStringOrError(strs []string, err error) ref.Val { return types.DefaultTypeAdapter.NativeToValue(strs) } -func macroTargetMatchesNamespace(ns string, target ast.Expr) bool { +func extractIdent(target ast.Expr) (string, bool) { switch target.Kind() { case ast.IdentKind: - if target.AsIdent() != ns { - return false - } - return true + return target.AsIdent(), true default: - return false + return "", false + } +} + +func macroTargetMatchesNamespace(ns string, target ast.Expr) bool { + if id, found := extractIdent(target); found { + return id == ns } + return false } diff --git a/vendor/github.com/google/cel-go/ext/lists.go b/vendor/github.com/google/cel-go/ext/lists.go index 08751d08a..d0b90ea92 100644 --- a/vendor/github.com/google/cel-go/ext/lists.go +++ b/vendor/github.com/google/cel-go/ext/lists.go @@ -16,15 +16,70 @@ package ext import ( "fmt" + "math" + "sort" "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" ) +var comparableTypes = []*cel.Type{ + cel.IntType, + cel.UintType, + cel.DoubleType, + cel.BoolType, + cel.DurationType, + cel.TimestampType, + cel.StringType, + cel.BytesType, +} + // Lists returns a cel.EnvOption to configure extended functions for list manipulation. // As a general note, all indices are zero-based. +// +// # Distinct +// +// Introduced in version: 2 +// +// Returns the distinct elements of a list. +// +// .distinct() -> +// +// Examples: +// +// [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3] +// ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"] +// [1, "b", 2, "b"].distinct() // return [1, "b", 2] +// +// # Range +// +// Introduced in version: 2 +// +// Returns a list of integers from 0 to n-1. +// +// lists.range() -> +// +// Examples: +// +// lists.range(5) -> [0, 1, 2, 3, 4] +// +// # Reverse +// +// Introduced in version: 2 +// +// Returns the elements of a list in reverse order. +// +// .reverse() -> +// +// Examples: +// +// [5, 3, 1, 2].reverse() // return [2, 1, 3, 5] +// // # Slice // // Returns a new sub-list using the indexes provided. @@ -35,21 +90,105 @@ import ( // // [1,2,3,4].slice(1, 3) // return [2, 3] // [1,2,3,4].slice(2, 4) // return [3 ,4] -func Lists() cel.EnvOption { - return cel.Lib(listsLib{}) +// +// # Flatten +// +// Flattens a list recursively. +// If an optional depth is provided, the list is flattened to a the specificied level. +// A negative depth value will result in an error. +// +// .flatten() -> +// .flatten(, ) -> +// +// Examples: +// +// [1,[2,3],[4]].flatten() // return [1, 2, 3, 4] +// [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]] +// [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4] +// [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]] +// [1,[2,[3,[4]]]].flatten(-1) // error +// +// # Sort +// +// Introduced in version: 2 +// +// Sorts a list with comparable elements. If the element type is not comparable +// or the element types are not the same, the function will produce an error. +// +// .sort() -> +// T in {int, uint, double, bool, duration, timestamp, string, bytes} +// +// Examples: +// +// [3, 2, 1].sort() // return [1, 2, 3] +// ["b", "c", "a"].sort() // return ["a", "b", "c"] +// [1, "b"].sort() // error +// [[1, 2, 3]].sort() // error +// +// # SortBy +// +// Sorts a list by a key value, i.e., the order is determined by the result of +// an expression applied to each element of the list. +// The output of the key expression must be a comparable type, otherwise the +// function will return an error. +// +// .sortBy(, ) -> +// keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes} + +// Examples: +// +// [ +// Player { name: "foo", score: 0 }, +// Player { name: "bar", score: -10 }, +// Player { name: "baz", score: 1000 }, +// ].sortBy(e, e.score).map(e, e.name) +// == ["bar", "foo", "baz"] + +func Lists(options ...ListsOption) cel.EnvOption { + l := &listsLib{ + version: math.MaxUint32, + } + for _, o := range options { + l = o(l) + } + + return cel.Lib(l) } -type listsLib struct{} +type listsLib struct { + version uint32 +} // LibraryName implements the SingletonLibrary interface method. func (listsLib) LibraryName() string { return "cel.lib.ext.lists" } +// ListsOption is a functional interface for configuring the strings library. +type ListsOption func(*listsLib) *listsLib + +// ListsVersion configures the version of the string library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func ListsVersion(version uint32) ListsOption { + return func(lib *listsLib) *listsLib { + lib.version = version + return lib + } +} + // CompileOptions implements the Library interface method. -func (listsLib) CompileOptions() []cel.EnvOption { +func (lib listsLib) CompileOptions() []cel.EnvOption { listType := cel.ListType(cel.TypeParamType("T")) - return []cel.EnvOption{ + listListType := cel.ListType(listType) + listDyn := cel.ListType(cel.DynType) + opts := []cel.EnvOption{ cel.Function("slice", cel.MemberOverload("list_slice", []*cel.Type{listType, cel.IntType, cel.IntType}, listType, @@ -66,6 +205,151 @@ func (listsLib) CompileOptions() []cel.EnvOption { ), ), } + if lib.version >= 1 { + opts = append(opts, + cel.Function("flatten", + cel.MemberOverload("list_flatten", + []*cel.Type{listListType}, listType, + cel.UnaryBinding(func(arg ref.Val) ref.Val { + list, ok := arg.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + flatList, err := flatten(list, 1) + if err != nil { + return types.WrapErr(err) + } + + return types.DefaultTypeAdapter.NativeToValue(flatList) + }), + ), + cel.MemberOverload("list_flatten_int", + []*cel.Type{listDyn, types.IntType}, listDyn, + cel.BinaryBinding(func(arg1, arg2 ref.Val) ref.Val { + list, ok := arg1.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + depth, ok := arg2.(types.Int) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + flatList, err := flatten(list, int64(depth)) + if err != nil { + return types.WrapErr(err) + } + + return types.DefaultTypeAdapter.NativeToValue(flatList) + }), + ), + // To handle the case where a variable of just `list(T)` is provided at runtime + // with a graceful failure more, disable the type guards since the implementation + // can handle lists which are already flat. + decls.DisableTypeGuards(true), + ), + ) + } + if lib.version >= 2 { + sortDecl := cel.Function("sort", + append( + templatedOverloads(comparableTypes, func(t *cel.Type) cel.FunctionOpt { + return cel.MemberOverload( + fmt.Sprintf("list_%s_sort", t.TypeName()), + []*cel.Type{cel.ListType(t)}, cel.ListType(t), + ) + }), + cel.SingletonUnaryBinding( + func(arg ref.Val) ref.Val { + list, ok := arg.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + sorted, err := sortList(list) + if err != nil { + return types.WrapErr(err) + } + + return sorted + }, + // List traits + traits.ListerType, + ), + )..., + ) + opts = append(opts, sortDecl) + opts = append(opts, cel.Macros(cel.ReceiverMacro("sortBy", 2, sortByMacro))) + opts = append(opts, cel.Function("@sortByAssociatedKeys", + append( + templatedOverloads(comparableTypes, func(u *cel.Type) cel.FunctionOpt { + return cel.MemberOverload( + fmt.Sprintf("list_%s_sortByAssociatedKeys", u.TypeName()), + []*cel.Type{listType, cel.ListType(u)}, listType, + ) + }), + cel.SingletonBinaryBinding( + func(arg1 ref.Val, arg2 ref.Val) ref.Val { + list, ok := arg1.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + keys, ok := arg2.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + sorted, err := sortListByAssociatedKeys(list, keys) + if err != nil { + return types.WrapErr(err) + } + + return sorted + }, + // List traits + traits.ListerType, + ), + )..., + )) + + opts = append(opts, cel.Function("lists.range", + cel.Overload("lists_range", + []*cel.Type{cel.IntType}, cel.ListType(cel.IntType), + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + n := args[0].(types.Int) + result, err := genRange(n) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + opts = append(opts, cel.Function("reverse", + cel.MemberOverload("list_reverse", + []*cel.Type{listType}, listType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + list := args[0].(traits.Lister) + result, err := reverseList(list) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + opts = append(opts, cel.Function("distinct", + cel.MemberOverload("list_distinct", + []*cel.Type{listType}, listType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + result, err := distinctList(list.(traits.Lister)) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + } + + return opts } // ProgramOptions implements the Library interface method. @@ -73,6 +357,24 @@ func (listsLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } +func genRange(n types.Int) (ref.Val, error) { + var newList []ref.Val + for i := types.Int(0); i < n; i++ { + newList = append(newList, i) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + +func reverseList(list traits.Lister) (ref.Val, error) { + var newList []ref.Val + listLength := list.Size().(types.Int) + for i := types.Int(0); i < listLength; i++ { + val := list.Get(listLength - i - 1) + newList = append(newList, val) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { listLength := list.Size().(types.Int) if start < 0 || end < 0 { @@ -92,3 +394,167 @@ func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { } return types.DefaultTypeAdapter.NativeToValue(newList), nil } + +func flatten(list traits.Lister, depth int64) ([]ref.Val, error) { + if depth < 0 { + return nil, fmt.Errorf("level must be non-negative") + } + + var newList []ref.Val + iter := list.Iterator() + + for iter.HasNext() == types.True { + val := iter.Next() + nestedList, isList := val.(traits.Lister) + + if !isList || depth == 0 { + newList = append(newList, val) + continue + } else { + flattenedList, err := flatten(nestedList, depth-1) + if err != nil { + return nil, err + } + + newList = append(newList, flattenedList...) + } + } + + return newList, nil +} + +func sortList(list traits.Lister) (ref.Val, error) { + return sortListByAssociatedKeys(list, list) +} + +// Internal function used for the implementation of sort() and sortBy(). +// +// Sorts a list of arbitrary elements, according to the order produced by sorting +// another list of comparable elements. If the element type of the keys is not +// comparable or the element types are not the same, the function will produce an error. +// +// .@sortByAssociatedKeys() -> +// U in {int, uint, double, bool, duration, timestamp, string, bytes} +// +// Example: +// +// ["foo", "bar", "baz"].@sortByAssociatedKeys([3, 1, 2]) // return ["bar", "baz", "foo"] +func sortListByAssociatedKeys(list, keys traits.Lister) (ref.Val, error) { + listLength := list.Size().(types.Int) + keysLength := keys.Size().(types.Int) + if listLength != keysLength { + return nil, fmt.Errorf( + "@sortByAssociatedKeys() expected a list of the same size as the associated keys list, but got %d and %d elements respectively", + listLength, + keysLength, + ) + } + if listLength == 0 { + return list, nil + } + elem := keys.Get(types.IntZero) + if _, ok := elem.(traits.Comparer); !ok { + return nil, fmt.Errorf("list elements must be comparable") + } + + sortedIndices := make([]ref.Val, 0, listLength) + for i := types.IntZero; i < listLength; i++ { + if keys.Get(i).Type() != elem.Type() { + return nil, fmt.Errorf("list elements must have the same type") + } + sortedIndices = append(sortedIndices, i) + } + + sort.Slice(sortedIndices, func(i, j int) bool { + iKey := keys.Get(sortedIndices[i]) + jKey := keys.Get(sortedIndices[j]) + return iKey.(traits.Comparer).Compare(jKey) == types.IntNegOne + }) + + sorted := make([]ref.Val, 0, listLength) + + for _, sortedIdx := range sortedIndices { + sorted = append(sorted, list.Get(sortedIdx)) + } + return types.DefaultTypeAdapter.NativeToValue(sorted), nil +} + +// sortByMacro transforms an expression like: +// +// mylistExpr.sortBy(e, -math.abs(e)) +// +// into something equivalent to: +// +// cel.bind( +// __sortBy_input__, +// myListExpr, +// __sortBy_input__.@sortByAssociatedKeys(__sortBy_input__.map(e, -math.abs(e)) +// ) +func sortByMacro(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + varIdent := meh.NewIdent("@__sortBy_input__") + varName := varIdent.AsIdent() + + targetKind := target.Kind() + if targetKind != ast.ListKind && + targetKind != ast.SelectKind && + targetKind != ast.IdentKind && + targetKind != ast.ComprehensionKind && targetKind != ast.CallKind { + return nil, meh.NewError(target.ID(), fmt.Sprintf("sortBy can only be applied to a list, identifier, comprehension, call or select expression")) + } + + mapCompr, err := parser.MakeMap(meh, meh.Copy(varIdent), args) + if err != nil { + return nil, err + } + callExpr := meh.NewMemberCall("@sortByAssociatedKeys", + meh.Copy(varIdent), + mapCompr, + ) + + bindExpr := meh.NewComprehension( + meh.NewList(), + "#unused", + varName, + target, + meh.NewLiteral(types.False), + varIdent, + callExpr, + ) + + return bindExpr, nil +} + +func distinctList(list traits.Lister) (ref.Val, error) { + listLength := list.Size().(types.Int) + if listLength == 0 { + return list, nil + } + uniqueList := make([]ref.Val, 0, listLength) + for i := types.IntZero; i < listLength; i++ { + val := list.Get(i) + seen := false + for j := types.IntZero; j < types.Int(len(uniqueList)); j++ { + if i == j { + continue + } + other := uniqueList[j] + if val.Equal(other) == types.True { + seen = true + break + } + } + if !seen { + uniqueList = append(uniqueList, val) + } + } + + return types.DefaultTypeAdapter.NativeToValue(uniqueList), nil +} + +func templatedOverloads(types []*cel.Type, template func(t *cel.Type) cel.FunctionOpt) []cel.FunctionOpt { + overloads := make([]cel.FunctionOpt, len(types)) + for i, t := range types { + overloads[i] = template(t) + } + return overloads +} diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go index 65d7e2eb0..250246db1 100644 --- a/vendor/github.com/google/cel-go/ext/math.go +++ b/vendor/github.com/google/cel-go/ext/math.go @@ -16,6 +16,7 @@ package ext import ( "fmt" + "math" "strings" "github.com/google/cel-go/cel" @@ -86,28 +87,312 @@ import ( // math.least('string') // parse error // math.least(a, b) // check-time error if a or b is non-numeric // math.least(dyn('string')) // runtime error -func Math() cel.EnvOption { - return cel.Lib(mathLib{}) +// +// # Math.BitOr +// +// Introduced at version: 1 +// +// Performs a bitwise-OR operation over two int or uint values. +// +// math.bitOr(, ) -> +// math.bitOr(, ) -> +// +// Examples: +// +// math.bitOr(1u, 2u) // returns 3u +// math.bitOr(-2, -4) // returns -2 +// +// # Math.BitAnd +// +// Introduced at version: 1 +// +// Performs a bitwise-AND operation over two int or uint values. +// +// math.bitAnd(, ) -> +// math.bitAnd(, ) -> +// +// Examples: +// +// math.bitAnd(3u, 2u) // return 2u +// math.bitAnd(3, 5) // returns 3 +// math.bitAnd(-3, -5) // returns -7 +// +// # Math.BitXor +// +// Introduced at version: 1 +// +// math.bitXor(, ) -> +// math.bitXor(, ) -> +// +// Performs a bitwise-XOR operation over two int or uint values. +// +// Examples: +// +// math.bitXor(3u, 5u) // returns 6u +// math.bitXor(1, 3) // returns 2 +// +// # Math.BitNot +// +// Introduced at version: 1 +// +// Function which accepts a single int or uint and performs a bitwise-NOT +// ones-complement of the given binary value. +// +// math.bitNot() -> +// math.bitNot() -> +// +// Examples +// +// math.bitNot(1) // returns -1 +// math.bitNot(-1) // return 0 +// math.bitNot(0u) // returns 18446744073709551615u +// +// # Math.BitShiftLeft +// +// Introduced at version: 1 +// +// Perform a left shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will be always be returned +// since the number of bits shifted is greater than or equal to the total bit +// length of the number being shifted. Negative valued bit shifts will result +// in a runtime error. +// +// math.bitShiftLeft(, ) -> +// math.bitShiftLeft(, ) -> +// +// Examples +// +// math.bitShiftLeft(1, 2) // returns 4 +// math.bitShiftLeft(-1, 2) // returns -4 +// math.bitShiftLeft(1u, 2) // return 4u +// math.bitShiftLeft(1u, 200) // returns 0u +// +// # Math.BitShiftRight +// +// Introduced at version: 1 +// +// Perform a right shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will always be returned since +// the number of bits shifted is greater than or equal to the total bit length +// of the number being shifted. Negative valued bit shifts will result in a +// runtime error. +// +// The sign bit extension will not be preserved for this operation: vacant bits +// on the left are filled with 0. +// +// math.bitShiftRight(, ) -> +// math.bitShiftRight(, ) -> +// +// Examples +// +// math.bitShiftRight(1024, 2) // returns 256 +// math.bitShiftRight(1024u, 2) // returns 256u +// math.bitShiftRight(1024u, 64) // returns 0u +// +// # Math.Ceil +// +// Introduced at version: 1 +// +// Compute the ceiling of a double value. +// +// math.ceil() -> +// +// Examples: +// +// math.ceil(1.2) // returns 2.0 +// math.ceil(-1.2) // returns -1.0 +// +// # Math.Floor +// +// Introduced at version: 1 +// +// Compute the floor of a double value. +// +// math.floor() -> +// +// Examples: +// +// math.floor(1.2) // returns 1.0 +// math.floor(-1.2) // returns -2.0 +// +// # Math.Round +// +// Introduced at version: 1 +// +// Rounds the double value to the nearest whole number with ties rounding away +// from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. +// +// math.round() -> +// +// Examples: +// +// math.round(1.2) // returns 1.0 +// math.round(1.5) // returns 2.0 +// math.round(-1.5) // returns -2.0 +// +// # Math.Trunc +// +// Introduced at version: 1 +// +// Truncates the fractional portion of the double value. +// +// math.trunc() -> +// +// Examples: +// +// math.trunc(-1.3) // returns -1.0 +// math.trunc(1.3) // returns 1.0 +// +// # Math.Abs +// +// Introduced at version: 1 +// +// Returns the absolute value of the numeric type provided as input. If the +// value is NaN, the output is NaN. If the input is int64 min, the function +// will result in an overflow error. +// +// math.abs() -> +// math.abs() -> +// math.abs() -> +// +// Examples: +// +// math.abs(-1) // returns 1 +// math.abs(1) // returns 1 +// math.abs(-9223372036854775808) // overflow error +// +// # Math.Sign +// +// Introduced at version: 1 +// +// Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +// uint depending on the overload. For floating point values, if NaN is +// provided as input, the output is also NaN. The implementation does not +// differentiate between positive and negative zero. +// +// math.sign() -> +// math.sign() -> +// math.sign() -> +// +// Examples: +// +// math.sign(-42) // returns -1 +// math.sign(0) // returns 0 +// math.sign(42) // returns 1 +// +// # Math.IsInf +// +// Introduced at version: 1 +// +// Returns true if the input double value is -Inf or +Inf. +// +// math.isInf() -> +// +// Examples: +// +// math.isInf(1.0/0.0) // returns true +// math.isInf(1.2) // returns false +// +// # Math.IsNaN +// +// Introduced at version: 1 +// +// Returns true if the input double value is NaN, false otherwise. +// +// math.isNaN() -> +// +// Examples: +// +// math.isNaN(0.0/0.0) // returns true +// math.isNaN(1.2) // returns false +// +// # Math.IsFinite +// +// Introduced at version: 1 +// +// Returns true if the value is a finite number. Equivalent in behavior to: +// !math.isNaN(double) && !math.isInf(double) +// +// math.isFinite() -> +// +// Examples: +// +// math.isFinite(0.0/0.0) // returns false +// math.isFinite(1.2) // returns true +func Math(options ...MathOption) cel.EnvOption { + m := &mathLib{version: math.MaxUint32} + for _, o := range options { + m = o(m) + } + return cel.Lib(m) } const ( mathNamespace = "math" leastMacro = "least" greatestMacro = "greatest" - minFunc = "math.@min" - maxFunc = "math.@max" + + // Min-max functions + minFunc = "math.@min" + maxFunc = "math.@max" + + // Rounding functions + ceilFunc = "math.ceil" + floorFunc = "math.floor" + roundFunc = "math.round" + truncFunc = "math.trunc" + + // Floating point helper functions + isInfFunc = "math.isInf" + isNanFunc = "math.isNaN" + isFiniteFunc = "math.isFinite" + + // Signedness functions + absFunc = "math.abs" + signFunc = "math.sign" + + // Bitwise functions + bitAndFunc = "math.bitAnd" + bitOrFunc = "math.bitOr" + bitXorFunc = "math.bitXor" + bitNotFunc = "math.bitNot" + bitShiftLeftFunc = "math.bitShiftLeft" + bitShiftRightFunc = "math.bitShiftRight" ) -type mathLib struct{} +var ( + errIntOverflow = types.NewErr("integer overflow") +) + +// MathOption declares a functional operator for configuring math extensions. +type MathOption func(*mathLib) *mathLib + +// MathVersion sets the library version for math extensions. +func MathVersion(version uint32) MathOption { + return func(lib *mathLib) *mathLib { + lib.version = version + return lib + } +} + +type mathLib struct { + version uint32 +} // LibraryName implements the SingletonLibrary interface method. -func (mathLib) LibraryName() string { +func (*mathLib) LibraryName() string { return "cel.lib.ext.math" } // CompileOptions implements the Library interface method. -func (mathLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ +func (lib *mathLib) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ cel.Macros( // math.least(num, ...) cel.ReceiverVarArgMacro(leastMacro, mathLeast), @@ -179,10 +464,95 @@ func (mathLib) CompileOptions() []cel.EnvOption { cel.UnaryBinding(maxList)), ), } + if lib.version >= 1 { + opts = append(opts, + // Rounding function declarations + cel.Function(ceilFunc, + cel.Overload("math_ceil_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(ceil))), + cel.Function(floorFunc, + cel.Overload("math_floor_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(floor))), + cel.Function(roundFunc, + cel.Overload("math_round_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(round))), + cel.Function(truncFunc, + cel.Overload("math_trunc_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(trunc))), + + // Floating point helpers + cel.Function(isInfFunc, + cel.Overload("math_isInf_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isInf))), + cel.Function(isNanFunc, + cel.Overload("math_isNaN_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isNaN))), + cel.Function(isFiniteFunc, + cel.Overload("math_isFinite_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isFinite))), + + // Signedness functions + cel.Function(absFunc, + cel.Overload("math_abs_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(absDouble)), + cel.Overload("math_abs_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(absInt)), + cel.Overload("math_abs_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + ), + cel.Function(signFunc, + cel.Overload("math_sign_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(sign)), + ), + + // Bitwise operator declarations + cel.Function(bitAndFunc, + cel.Overload("math_bitAnd_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitAndPairInt)), + cel.Overload("math_bitAnd_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitAndPairUint)), + ), + cel.Function(bitOrFunc, + cel.Overload("math_bitOr_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitOrPairInt)), + cel.Overload("math_bitOr_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitOrPairUint)), + ), + cel.Function(bitXorFunc, + cel.Overload("math_bitXor_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitXorPairInt)), + cel.Overload("math_bitXor_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitXorPairUint)), + ), + cel.Function(bitNotFunc, + cel.Overload("math_bitNot_int_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(bitNotInt)), + cel.Overload("math_bitNot_uint_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(bitNotUint)), + ), + cel.Function(bitShiftLeftFunc, + cel.Overload("math_bitShiftLeft_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftLeftIntInt)), + cel.Overload("math_bitShiftLeft_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftLeftUintInt)), + ), + cel.Function(bitShiftRightFunc, + cel.Overload("math_bitShiftRight_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftRightIntInt)), + cel.Overload("math_bitShiftRight_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftRightUintInt)), + ), + ) + } + return opts } // ProgramOptions implements the Library interface method. -func (mathLib) ProgramOptions() []cel.ProgramOption { +func (*mathLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } @@ -194,7 +564,7 @@ func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast. case 0: return nil, meh.NewError(target.ID(), "math.least() requires at least one argument") case 1: - if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { return meh.NewCall(minFunc, args[0]), nil } return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value") @@ -221,7 +591,7 @@ func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (a case 0: return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument") case 1: - if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { return mef.NewCall(maxFunc, args[0]), nil } return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value") @@ -244,6 +614,165 @@ func identity(val ref.Val) ref.Val { return val } +func ceil(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Ceil(float64(v))) +} + +func floor(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Floor(float64(v))) +} + +func round(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Round(float64(v))) +} + +func trunc(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Trunc(float64(v))) +} + +func isInf(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsInf(float64(v), 0)) +} + +func isFinite(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Bool(!math.IsInf(v, 0) && !math.IsNaN(v)) +} + +func isNaN(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsNaN(float64(v))) +} + +func absDouble(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Double(math.Abs(v)) +} + +func absInt(val ref.Val) ref.Val { + v := int64(val.(types.Int)) + if v == math.MinInt64 { + return errIntOverflow + } + if v >= 0 { + return val + } + return -types.Int(v) +} + +func sign(val ref.Val) ref.Val { + switch v := val.(type) { + case types.Double: + if isNaN(v) == types.True { + return v + } + zero := types.Double(0) + if v > zero { + return types.Double(1) + } + if v < zero { + return types.Double(-1) + } + return zero + case types.Int: + return v.Compare(types.IntZero) + case types.Uint: + if v == types.Uint(0) { + return types.Uint(0) + } + return types.Uint(1) + default: + return maybeSuffixError(val, "math.sign") + } +} + +func bitAndPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l & r +} + +func bitAndPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l & r +} + +func bitOrPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l | r +} + +func bitOrPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l | r +} + +func bitXorPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l ^ r +} + +func bitXorPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l ^ r +} + +func bitNotInt(value ref.Val) ref.Val { + v := value.(types.Int) + return ^v +} + +func bitNotUint(value ref.Val) ref.Val { + v := value.(types.Uint) + return ^v +} + +func bitShiftLeftIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftLeftUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftRightIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return types.Int(types.Uint(v) >> bs) +} + +func bitShiftRightUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return v >> bs +} + func minPair(first, second ref.Val) ref.Val { cmp, ok := first.(traits.Comparer) if !ok { @@ -321,13 +850,13 @@ func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr } func checkInvalidArgLiteral(funcName string, arg ast.Expr) error { - if !isValidArgType(arg) { + if !isNumericArgType(arg) { return fmt.Errorf("%s simple literal arguments must be numeric", funcName) } return nil } -func isValidArgType(arg ast.Expr) bool { +func isNumericArgType(arg ast.Expr) bool { switch arg.Kind() { case ast.LiteralKind: c := ref.Val(arg.AsLiteral()) @@ -344,7 +873,7 @@ func isValidArgType(arg ast.Expr) bool { } } -func isListLiteralWithValidArgs(arg ast.Expr) bool { +func isListLiteralWithNumericArgs(arg ast.Expr) bool { switch arg.Kind() { case ast.ListKind: list := arg.AsList() @@ -352,7 +881,7 @@ func isListLiteralWithValidArgs(arg ast.Expr) bool { return false } for _, e := range list.Elements() { - if !isValidArgType(e) { + if !isNumericArgType(e) { return false } } diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index d1b787775..36ab4a7ae 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -15,6 +15,7 @@ package ext import ( + "errors" "fmt" "reflect" "strings" @@ -77,12 +78,45 @@ var ( // same advice holds if you are using custom type adapters and type providers. The native type // provider composes over whichever type adapter and provider is configured in the cel.Env at // the time that it is invoked. -func NativeTypes(refTypes ...any) cel.EnvOption { +// +// There is also the possibility to rename the fields of native structs by setting the `cel` tag +// for fields you want to override. In order to enable this feature, pass in the `EnableStructTag` +// option. Here is an example to see it in action: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// OwnerName string `cel:"owner"` +// } +// +// ``` +// +// The `OwnerName` field is now accessible in CEL via `owner`, e.g. `identity.Account{owner: 'bob'}`. +// In case there are duplicated field names in the struct, an error will be returned. +func NativeTypes(args ...any) cel.EnvOption { return func(env *cel.Env) (*cel.Env, error) { - tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...) + nativeTypes := make([]any, 0, len(args)) + tpOptions := nativeTypeOptions{} + + for _, v := range args { + switch v := v.(type) { + case NativeTypesOption: + err := v(&tpOptions) + if err != nil { + return nil, err + } + default: + nativeTypes = append(nativeTypes, v) + } + } + + tp, err := newNativeTypeProvider(tpOptions, env.CELTypeAdapter(), env.CELTypeProvider(), nativeTypes...) if err != nil { return nil, err } + env, err = cel.CustomTypeAdapter(tp)(env) if err != nil { return nil, err @@ -91,12 +125,79 @@ func NativeTypes(refTypes ...any) cel.EnvOption { } } -func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { +// NativeTypesOption is a functional interface for configuring handling of native types. +type NativeTypesOption func(*nativeTypeOptions) error + +// NativeTypesFieldNameHandler is a handler for mapping a reflect.StructField to a CEL field name. +// This can be used to override the default Go struct field to CEL field name mapping. +type NativeTypesFieldNameHandler = func(field reflect.StructField) string + +func fieldNameByTag(structTagToParse string) func(field reflect.StructField) string { + return func(field reflect.StructField) string { + tag, found := field.Tag.Lookup(structTagToParse) + if found { + splits := strings.Split(tag, ",") + if len(splits) > 0 { + // We make the assumption that the leftmost entry in the tag is the name. + // This seems to be true for most tags that have the concept of a name/key, such as: + // https://pkg.go.dev/encoding/xml#Marshal + // https://pkg.go.dev/encoding/json#Marshal + // https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#hdr-Structs + // https://pkg.go.dev/gopkg.in/yaml.v2#Marshal + name := splits[0] + return name + } + } + + return field.Name + } +} + +type nativeTypeOptions struct { + // fieldNameHandler controls how CEL should perform struct field renames. + // This is most commonly used for switching to parsing based off the struct field tag, + // such as "cel" or "json". + fieldNameHandler NativeTypesFieldNameHandler +} + +// ParseStructTags configures if native types field names should be overridable by CEL struct tags. +// This is equivalent to ParseStructTag("cel") +func ParseStructTags(enabled bool) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + if enabled { + ntp.fieldNameHandler = fieldNameByTag("cel") + } else { + ntp.fieldNameHandler = nil + } + return nil + } +} + +// ParseStructTag configures the struct tag to parse. The 0th item in the tag is used as the name of the CEL field. +// For example: +// If the tag to parse is "cel" and the struct field has tag cel:"foo", the CEL struct field will be "foo". +// If the tag to parse is "json" and the struct field has tag json:"foo,omitempty", the CEL struct field will be "foo". +func ParseStructTag(tag string) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.fieldNameHandler = fieldNameByTag(tag) + return nil + } +} + +// ParseStructField configures how to parse Go struct fields. It can be used to customize struct field parsing. +func ParseStructField(handler NativeTypesFieldNameHandler) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.fieldNameHandler = handler + return nil + } +} + +func newNativeTypeProvider(tpOptions nativeTypeOptions, adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { nativeTypes := make(map[string]*nativeType, len(refTypes)) for _, refType := range refTypes { switch rt := refType.(type) { case reflect.Type: - result, err := newNativeTypes(rt) + result, err := newNativeTypes(tpOptions.fieldNameHandler, rt) if err != nil { return nil, err } @@ -104,7 +205,7 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy nativeTypes[result[idx].TypeName()] = result[idx] } case reflect.Value: - result, err := newNativeTypes(rt.Type()) + result, err := newNativeTypes(tpOptions.fieldNameHandler, rt.Type()) if err != nil { return nil, err } @@ -119,6 +220,7 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy nativeTypes: nativeTypes, baseAdapter: adapter, baseProvider: provider, + options: tpOptions, }, nil } @@ -126,6 +228,7 @@ type nativeTypeProvider struct { nativeTypes map[string]*nativeType baseAdapter types.Adapter baseProvider types.Provider + options nativeTypeOptions } // EnumValue proxies to the types.Provider configured at the times the NativeTypes @@ -155,6 +258,14 @@ func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool return tp.baseProvider.FindStructType(typeName) } +func toFieldName(fieldNameHandler NativeTypesFieldNameHandler, f reflect.StructField) string { + if fieldNameHandler == nil { + return f.Name + } + + return fieldNameHandler(f) +} + // FindStructFieldNames looks up the type definition first from the native types, then from // the backing provider type set. If found, a set of field names corresponding to the type // will be returned. @@ -163,7 +274,7 @@ func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, b fieldCount := t.refType.NumField() fields := make([]string, fieldCount) for i := 0; i < fieldCount; i++ { - fields[i] = t.refType.Field(i).Name + fields[i] = toFieldName(tp.options.fieldNameHandler, t.refType.Field(i)) } return fields, true } @@ -192,13 +303,13 @@ func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (* Type: celType, IsSet: func(obj any) bool { refVal := reflect.Indirect(reflect.ValueOf(obj)) - refField := refVal.FieldByName(fieldName) + refField := refVal.FieldByName(refField.Name) return !refField.IsZero() }, GetFrom: func(obj any) (any, error) { refVal := reflect.Indirect(reflect.ValueOf(obj)) - refField := refVal.FieldByName(fieldName) - return getFieldValue(tp, refField), nil + refField := refVal.FieldByName(refField.Name) + return getFieldValue(refField), nil }, }, true } @@ -249,6 +360,9 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { case []byte: return tp.baseAdapter.NativeToValue(val) default: + if refVal.Type().Elem() == reflect.TypeOf(byte(0)) { + return tp.baseAdapter.NativeToValue(val) + } return types.NewDynamicList(tp, val) } case reflect.Map: @@ -259,7 +373,7 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { time.Time: return tp.baseAdapter.NativeToValue(val) default: - return newNativeObject(tp, val, rawVal) + return tp.newNativeObject(val, rawVal) } default: return tp.baseAdapter.NativeToValue(val) @@ -319,13 +433,13 @@ func convertToCelType(refType reflect.Type) (*cel.Type, bool) { return nil, false } -func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val { - valType, err := newNativeType(refValue.Type()) +func (tp *nativeTypeProvider) newNativeObject(val any, refValue reflect.Value) ref.Val { + valType, err := newNativeType(tp.options.fieldNameHandler, refValue.Type()) if err != nil { return types.NewErr(err.Error()) } return &nativeObj{ - Adapter: adapter, + Adapter: tp, val: val, valType: valType, refValue: refValue, @@ -372,12 +486,13 @@ func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) { if !fieldValue.IsValid() || fieldValue.IsZero() { continue } + fieldName := toFieldName(o.valType.fieldNameHandler, fieldType) fieldCELVal := o.NativeToValue(fieldValue.Interface()) fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType) if err != nil { return nil, err } - fields[fieldType.Name] = fieldJSONVal.(*structpb.Value) + fields[fieldName] = fieldJSONVal.(*structpb.Value) } return &structpb.Struct{Fields: fields}, nil } @@ -469,8 +584,8 @@ func (o *nativeObj) Value() any { return o.val } -func newNativeTypes(rawType reflect.Type) ([]*nativeType, error) { - nt, err := newNativeType(rawType) +func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) ([]*nativeType, error) { + nt, err := newNativeType(fieldNameHandler, rawType) if err != nil { return nil, err } @@ -489,7 +604,7 @@ func newNativeTypes(rawType reflect.Type) ([]*nativeType, error) { return } alreadySeen[t.String()] = struct{}{} - nt, ntErr := newNativeType(t) + nt, ntErr := newNativeType(fieldNameHandler, t) if ntErr != nil { err = ntErr return @@ -505,7 +620,11 @@ func newNativeTypes(rawType reflect.Type) ([]*nativeType, error) { return result, err } -func newNativeType(rawType reflect.Type) (*nativeType, error) { +var ( + errDuplicatedFieldName = errors.New("field name already exists in struct") +) + +func newNativeType(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) (*nativeType, error) { refType := rawType if refType.Kind() == reflect.Pointer { refType = refType.Elem() @@ -513,15 +632,34 @@ func newNativeType(rawType reflect.Type) (*nativeType, error) { if !isValidObjectType(refType) { return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType) } + + // Since naming collisions can only happen with struct tag parsing, we only check for them if it is enabled. + if fieldNameHandler != nil { + fieldNames := make(map[string]struct{}) + + for idx := 0; idx < refType.NumField(); idx++ { + field := refType.Field(idx) + fieldName := toFieldName(fieldNameHandler, field) + + if _, found := fieldNames[fieldName]; found { + return nil, fmt.Errorf("invalid field name `%s` in struct `%s`: %w", fieldName, refType.Name(), errDuplicatedFieldName) + } else { + fieldNames[fieldName] = struct{}{} + } + } + } + return &nativeType{ - typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), - refType: refType, + typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + refType: refType, + fieldNameHandler: fieldNameHandler, }, nil } type nativeType struct { - typeName string - refType reflect.Type + typeName string + refType reflect.Type + fieldNameHandler NativeTypesFieldNameHandler } // ConvertToNative implements ref.Val.ConvertToNative. @@ -569,9 +707,26 @@ func (t *nativeType) Value() any { return t.typeName } +// fieldByName returns the corresponding reflect.StructField for the give name either by matching +// field tag or field name. +func (t *nativeType) fieldByName(fieldName string) (reflect.StructField, bool) { + if t.fieldNameHandler == nil { + return t.refType.FieldByName(fieldName) + } + + for i := 0; i < t.refType.NumField(); i++ { + f := t.refType.Field(i) + if toFieldName(t.fieldNameHandler, f) == fieldName { + return f, true + } + } + + return reflect.StructField{}, false +} + // hasField returns whether a field name has a corresponding Golang reflect.StructField func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { - f, found := t.refType.FieldByName(fieldName) + f, found := t.fieldByName(fieldName) if !found || !f.IsExported() || !isSupportedType(f.Type) { return reflect.StructField{}, false } @@ -579,21 +734,16 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { } func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { - return adapter.NativeToValue(getFieldValue(adapter, refField)) + return adapter.NativeToValue(getFieldValue(refField)) } -func getFieldValue(adapter types.Adapter, refField reflect.Value) any { +func getFieldValue(refField reflect.Value) any { if refField.IsZero() { switch refField.Kind() { - case reflect.Array, reflect.Slice: - return types.NewDynamicList(adapter, []ref.Val{}) - case reflect.Map: - return types.NewDynamicMap(adapter, map[ref.Val]ref.Val{}) case reflect.Struct: if refField.Type() == timestampType { - return types.Timestamp{Time: time.Unix(0, 0)} + return time.Unix(0, 0) } - return reflect.New(refField.Type()).Elem().Interface() case reflect.Pointer: return reflect.New(refField.Type().Elem()).Interface() } diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go index 2e20f1e4c..2e590a4c5 100644 --- a/vendor/github.com/google/cel-go/ext/strings.go +++ b/vendor/github.com/google/cel-go/ext/strings.go @@ -119,7 +119,8 @@ const ( // 'hello mellow'.indexOf('jello') // returns -1 // 'hello mellow'.indexOf('', 2) // returns 2 // 'hello mellow'.indexOf('ello', 2) // returns 7 -// 'hello mellow'.indexOf('ello', 20) // error +// 'hello mellow'.indexOf('ello', 20) // returns -1 +// 'hello mellow'.indexOf('ello', -1) // error // // # Join // @@ -155,6 +156,7 @@ const ( // 'hello mellow'.lastIndexOf('ello') // returns 7 // 'hello mellow'.lastIndexOf('jello') // returns -1 // 'hello mellow'.lastIndexOf('ello', 6) // returns 1 +// 'hello mellow'.lastIndexOf('ello', 20) // returns -1 // 'hello mellow'.lastIndexOf('ello', -1) // error // // # LowerAscii @@ -520,7 +522,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { if lib.version >= 3 { opts = append(opts, cel.Function("reverse", - cel.MemberOverload("reverse", []*cel.Type{cel.StringType}, cel.StringType, + cel.MemberOverload("string_reverse", []*cel.Type{cel.StringType}, cel.StringType, cel.UnaryBinding(func(str ref.Val) ref.Val { s := str.(types.String) return stringOrError(reverse(string(s))) @@ -561,9 +563,13 @@ func indexOfOffset(str, substr string, offset int64) (int64, error) { off := int(offset) runes := []rune(str) subrunes := []rune(substr) - if off < 0 || off >= len(runes) { + if off < 0 { return -1, fmt.Errorf("index out of range: %d", off) } + // If the offset exceeds the length, return -1 rather than error. + if off >= len(runes) { + return -1, nil + } for i := off; i < len(runes)-(len(subrunes)-1); i++ { found := true for j := 0; j < len(subrunes); j++ { @@ -594,9 +600,13 @@ func lastIndexOfOffset(str, substr string, offset int64) (int64, error) { off := int(offset) runes := []rune(str) subrunes := []rune(substr) - if off < 0 || off >= len(runes) { + if off < 0 { return -1, fmt.Errorf("index out of range: %d", off) } + // If the offset is far greater than the length return -1 + if off >= len(runes) { + return -1, nil + } if off > len(runes)-len(subrunes) { off = len(runes) - len(subrunes) } diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go index a80264451..1577f3590 100644 --- a/vendor/github.com/google/cel-go/interpreter/activation.go +++ b/vendor/github.com/google/cel-go/interpreter/activation.go @@ -17,7 +17,6 @@ package interpreter import ( "errors" "fmt" - "sync" "github.com/google/cel-go/common/types/ref" ) @@ -167,35 +166,3 @@ type partActivation struct { func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { return a.unknowns } - -// varActivation represents a single mutable variable binding. -// -// This activation type should only be used within folds as the fold loop controls the object -// life-cycle. -type varActivation struct { - parent Activation - name string - val ref.Val -} - -// Parent implements the Activation interface method. -func (v *varActivation) Parent() Activation { - return v.parent -} - -// ResolveName implements the Activation interface method. -func (v *varActivation) ResolveName(name string) (any, bool) { - if name == v.name { - return v.val, true - } - return v.parent.ResolveName(name) -} - -var ( - // pool of var activations to reduce allocations during folds. - varActivationPool = &sync.Pool{ - New: func() any { - return &varActivation{} - }, - } -) diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 1fbaaf17e..8f19bde7e 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -178,10 +178,8 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. -func NewPartialAttributeFactory(container *containers.Container, - adapter types.Adapter, - provider types.Provider) AttributeFactory { - fac := NewAttributeFactory(container, adapter, provider) +func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := NewAttributeFactory(container, adapter, provider, opts...) return &partialAttributeFactory{ AttributeFactory: fac, container: container, diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index 0098750dd..b1b3aacc8 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -126,21 +126,39 @@ type NamespacedAttribute interface { Qualifiers() []Qualifier } +// AttrFactoryOption specifies a functional option for configuring an attribute factory. +type AttrFactoryOption func(*attrFactory) *attrFactory + +// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection +// is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { + return func(fac *attrFactory) *attrFactory { + fac.errorOnBadPresenceTest = value + return fac + } +} + // NewAttributeFactory returns a default AttributeFactory which is produces Attribute values // capable of resolving types by simple names and qualify the values using the supported qualifier // types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { - return &attrFactory{ +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := &attrFactory{ container: cont, adapter: a, provider: p, } + for _, o := range opts { + fac = o(fac) + } + return fac } type attrFactory struct { container *containers.Container adapter types.Adapter provider types.Provider + + errorOnBadPresenceTest bool } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -149,12 +167,13 @@ type attrFactory struct { // resolution rules. func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { return &absoluteAttribute{ - id: id, - namespaceNames: names, - qualifiers: []Qualifier{}, - adapter: r.adapter, - provider: r.provider, - fac: r, + id: id, + namespaceNames: names, + qualifiers: []Qualifier{}, + adapter: r.adapter, + provider: r.provider, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, } } @@ -188,11 +207,12 @@ func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { // RelativeAttribute refers to an expression and an optional qualifier path. func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { return &relativeAttribute{ - id: id, - operand: operand, - qualifiers: []Qualifier{}, - adapter: r.adapter, - fac: r, + id: id, + operand: operand, + qualifiers: []Qualifier{}, + adapter: r.adapter, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, } } @@ -214,7 +234,7 @@ func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, o }, nil } } - return newQualifier(r.adapter, qualID, val, opt) + return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) } type absoluteAttribute struct { @@ -226,6 +246,8 @@ type absoluteAttribute struct { adapter types.Adapter provider types.Provider fac AttributeFactory + + errorOnBadPresenceTest bool } // ID implements the Attribute interface method. @@ -514,6 +536,8 @@ type relativeAttribute struct { qualifiers []Qualifier adapter types.Adapter fac AttributeFactory + + errorOnBadPresenceTest bool } // ID is an implementation of the Attribute interface method. @@ -577,7 +601,7 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } -func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { +func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { var qual Qualifier switch val := v.(type) { case Attribute: @@ -592,71 +616,138 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, }, nil case string: qual = &stringQualifier{ - id: id, - value: val, - celValue: types.String(val), - adapter: adapter, - optional: opt, + id: id, + value: val, + celValue: types.String(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int: qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int32: qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int64: qual = &intQualifier{ - id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint32: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint64: qual = &uintQualifier{ - id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case bool: qual = &boolQualifier{ - id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Bool(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case float32: qual = &doubleQualifier{ - id: id, - value: float64(val), - celValue: types.Double(val), - adapter: adapter, - optional: opt, + id: id, + value: float64(val), + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case float64: qual = &doubleQualifier{ - id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.String: qual = &stringQualifier{ - id: id, value: string(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: string(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Int: qual = &intQualifier{ - id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Uint: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Bool: qual = &boolQualifier{ - id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: bool(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Double: qual = &doubleQualifier{ - id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: float64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case *types.Unknown: qual = &unknownQualifier{id: id, value: val} @@ -687,11 +778,12 @@ func (q *attrQualifier) IsOptional() bool { } type stringQualifier struct { - id int64 - value string - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value string + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -774,7 +866,7 @@ func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -788,11 +880,12 @@ func (q *stringQualifier) Value() ref.Val { } type intQualifier struct { - id int64 - value int64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value int64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -898,7 +991,7 @@ func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, p return o[i], true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -915,11 +1008,12 @@ func (q *intQualifier) Value() ref.Val { } type uintQualifier struct { - id int64 - value uint64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value uint64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -966,7 +1060,7 @@ func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -980,11 +1074,12 @@ func (q *uintQualifier) Value() ref.Val { } type boolQualifier struct { - id int64 - value bool - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value bool + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -1017,7 +1112,7 @@ func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -1092,11 +1187,12 @@ func (q *fieldQualifier) Value() ref.Val { // type may not be known ahead of time and may not conform to the standard types supported as valid // protobuf map key types. type doubleQualifier struct { - id int64 - value float64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value float64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -1120,7 +1216,7 @@ func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnl } func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } // Value implements the ConstantQualifier interface @@ -1226,7 +1322,7 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. -func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { case *types.Unknown: @@ -1283,7 +1379,7 @@ func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, prese } return val, true, nil default: - if presenceTest { + if presenceTest && !errorOnBadPresenceTest { return nil, false, nil } return nil, false, missingKey(idx) diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index 561238407..ebc432e9d 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -16,6 +16,7 @@ package interpreter import ( "fmt" + "sync" "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" @@ -96,7 +97,7 @@ type InterpretableCall interface { Args() []Interpretable } -// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map +// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map // or struct. type InterpretableConstructor interface { Interpretable @@ -720,24 +721,31 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) } +// InitVals implements the InterpretableConstructor interface method. func (o *evalObj) InitVals() []Interpretable { return o.vals } +// Type implements the InterpretableConstructor interface method. func (o *evalObj) Type() ref.Type { - return types.NewObjectTypeValue(o.typeName) + return types.NewObjectType(o.typeName) } type evalFold struct { - id int64 - accuVar string - iterVar string - iterRange Interpretable - accu Interpretable - cond Interpretable - step Interpretable - result Interpretable - adapter types.Adapter + id int64 + accuVar string + iterVar string + iterVar2 string + iterRange Interpretable + accu Interpretable + cond Interpretable + step Interpretable + result Interpretable + adapter types.Adapter + + // note an exhaustive fold will ensure that all branches are evaluated + // when using mutable values, these branches will mutate the final result + // rather than make a throw-away computation. exhaustive bool interruptable bool } @@ -749,64 +757,30 @@ func (fold *evalFold) ID() int64 { // Eval implements the Interpretable interface method. func (fold *evalFold) Eval(ctx Activation) ref.Val { - foldRange := fold.iterRange.Eval(ctx) - if !foldRange.Type().HasTrait(traits.IterableType) { - return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) - } - // Configure the fold activation with the accumulator initial value. - accuCtx := varActivationPool.Get().(*varActivation) - accuCtx.parent = ctx - accuCtx.name = fold.accuVar - accuCtx.val = fold.accu.Eval(ctx) - // If the accumulator starts as an empty list, then the comprehension will build a list - // so create a mutable list to optimize the cost of the inner loop. - l, ok := accuCtx.val.(traits.Lister) - buildingList := false - if !fold.exhaustive && ok && l.Size() == types.IntZero { - buildingList = true - accuCtx.val = types.NewMutableList(fold.adapter) - } - iterCtx := varActivationPool.Get().(*varActivation) - iterCtx.parent = accuCtx - iterCtx.name = fold.iterVar - - interrupted := false - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - // Modify the iter var in the fold activation. - iterCtx.val = it.Next() + // Initialize the folder interface + f := newFolder(fold, ctx) + defer releaseFolder(f) - // Evaluate the condition, terminate the loop if false. - cond := fold.cond.Eval(iterCtx) - condBool, ok := cond.(types.Bool) - if !fold.exhaustive && ok && condBool != types.True { - break - } - // Evaluate the evaluation step into accu var. - accuCtx.val = fold.step.Eval(iterCtx) - if fold.interruptable { - if stop, found := ctx.ResolveName("#interrupted"); found && stop == true { - interrupted = true - break - } + foldRange := fold.iterRange.Eval(ctx) + if fold.iterVar2 != "" { + var foldable traits.Foldable + switch r := foldRange.(type) { + case traits.Mapper: + foldable = types.ToFoldableMap(r) + case traits.Lister: + foldable = types.ToFoldableList(r) + default: + return types.NewErrWithNodeID(fold.ID(), "unsupported comprehension range type: %T", foldRange) } - } - varActivationPool.Put(iterCtx) - if interrupted { - varActivationPool.Put(accuCtx) - return types.NewErr("operation interrupted") + foldable.Fold(f) + return f.evalResult() } - // Compute the result. - res := fold.result.Eval(accuCtx) - varActivationPool.Put(accuCtx) - // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result. - if !types.IsUnknownOrError(res) && buildingList { - if _, ok := res.(traits.MutableLister); ok { - res = res.(traits.MutableLister).ToImmutableList() - } + if !foldRange.Type().HasTrait(traits.IterableType) { + return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) } - return res + iterable := foldRange.(traits.Iterable) + return f.foldIterable(iterable) } // Optional Interpretable implementations that specialize, subsume, or extend the core evaluation @@ -1262,3 +1236,172 @@ func invalidOptionalEntryInit(field any, value ref.Val) ref.Val { func invalidOptionalElementInit(value ref.Val) ref.Val { return types.NewErr("cannot initialize optional list element from non-optional value %v", value) } + +// newFolder creates or initializes a pooled folder instance. +func newFolder(eval *evalFold, ctx Activation) *folder { + f := folderPool.Get().(*folder) + f.evalFold = eval + f.Activation = ctx + return f +} + +// releaseFolder resets and releases a pooled folder instance. +func releaseFolder(f *folder) { + f.reset() + folderPool.Put(f) +} + +// folder tracks the state associated with folding a list or map with a comprehension v2 style macro. +// +// The folder embeds an interpreter.Activation and Interpretable evalFold value as well as implements +// the traits.Folder interface methods. +// +// Instances of a folder are intended to be pooled to minimize allocation overhead with this temporary +// bookkeeping object which supports lazy evaluation of the accumulator init expression which is useful +// in preserving evaluation order semantics which might otherwise be disrupted through the use of +// cel.bind or cel.@block. +type folder struct { + *evalFold + Activation + + // fold state objects. + accuVal ref.Val + iterVar1Val any + iterVar2Val any + + // bookkeeping flags to modify Activation and fold behaviors. + initialized bool + mutableValue bool + interrupted bool + computeResult bool +} + +func (f *folder) foldIterable(iterable traits.Iterable) ref.Val { + it := iterable.Iterator() + for it.HasNext() == types.True { + f.iterVar1Val = it.Next() + + cond := f.cond.Eval(f) + condBool, ok := cond.(types.Bool) + if f.interrupted || (!f.exhaustive && ok && condBool != types.True) { + return f.evalResult() + } + + // Update the accumulation value and check for eval interuption. + f.accuVal = f.step.Eval(f) + f.initialized = true + if f.interruptable && checkInterrupt(f.Activation) { + f.interrupted = true + return f.evalResult() + } + } + return f.evalResult() +} + +// FoldEntry will either fold comprehension v1 style macros if iterVar2 is unset, or comprehension v2 style +// macros if both the iterVar and iterVar2 are set to non-empty strings. +func (f *folder) FoldEntry(key, val any) bool { + // Default to referencing both values. + f.iterVar1Val = key + f.iterVar2Val = val + + // Terminate evaluation if evaluation is interrupted or the condition is not true and exhaustive + // eval is not enabled. + cond := f.cond.Eval(f) + condBool, ok := cond.(types.Bool) + if f.interrupted || (!f.exhaustive && ok && condBool != types.True) { + return false + } + + // Update the accumulation value and check for eval interuption. + f.accuVal = f.step.Eval(f) + f.initialized = true + if f.interruptable && checkInterrupt(f.Activation) { + f.interrupted = true + return false + } + return true +} + +// ResolveName overrides the default Activation lookup to perform lazy initialization of the accumulator +// and specialized lookups of iteration values with consideration for whether the final result is being +// computed and the iteration variables should be ignored. +func (f *folder) ResolveName(name string) (any, bool) { + if name == f.accuVar { + if !f.initialized { + f.initialized = true + initVal := f.accu.Eval(f.Activation) + if !f.exhaustive { + if l, isList := initVal.(traits.Lister); isList && l.Size() == types.IntZero { + initVal = types.NewMutableList(f.adapter) + f.mutableValue = true + } + if m, isMap := initVal.(traits.Mapper); isMap && m.Size() == types.IntZero { + initVal = types.NewMutableMap(f.adapter, map[ref.Val]ref.Val{}) + f.mutableValue = true + } + } + f.accuVal = initVal + } + return f.accuVal, true + } + if !f.computeResult { + if name == f.iterVar { + f.iterVar1Val = f.adapter.NativeToValue(f.iterVar1Val) + return f.iterVar1Val, true + } + if name == f.iterVar2 { + f.iterVar2Val = f.adapter.NativeToValue(f.iterVar2Val) + return f.iterVar2Val, true + } + } + return f.Activation.ResolveName(name) +} + +// evalResult computes the final result of the fold after all entries have been folded and accumulated. +func (f *folder) evalResult() ref.Val { + f.computeResult = true + if f.interrupted { + return types.NewErr("operation interrupted") + } + res := f.result.Eval(f) + // Convert a mutable list or map to an immutable one if the comprehension has generated a list or + // map as a result. + if !types.IsUnknownOrError(res) && f.mutableValue { + if _, ok := res.(traits.MutableLister); ok { + res = res.(traits.MutableLister).ToImmutableList() + } + if _, ok := res.(traits.MutableMapper); ok { + res = res.(traits.MutableMapper).ToImmutableMap() + } + } + return res +} + +// reset clears any state associated with folder evaluation. +func (f *folder) reset() { + f.evalFold = nil + f.Activation = nil + f.accuVal = nil + f.iterVar1Val = nil + f.iterVar2Val = nil + + f.initialized = false + f.mutableValue = false + f.interrupted = false + f.computeResult = false +} + +func checkInterrupt(a Activation) bool { + stop, found := a.ResolveName("#interrupted") + return found && stop == true +} + +var ( + // pool of var folders to reduce allocations during folds. + folderPool = &sync.Pool{ + New: func() any { + return &folder{} + }, + } +) diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index cf371f95d..3d918ce87 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -603,6 +603,7 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { accuVar: fold.AccuVar(), accu: accu, iterVar: fold.IterVar(), + iterVar2: fold.IterVar2(), iterRange: iterRange, cond: cond, step: step, diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel index e70433483..3efed87b7 100644 --- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -1,7 +1,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") package( - default_visibility = ["//parser:__subpackages__"], + default_visibility = ["//:__subpackages__"], licenses = ["notice"], # Apache 2.0 ) diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index 182ff034c..9f09ead0e 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -115,7 +115,7 @@ func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Exp func (p *parserHelper) newComprehension(ctx any, iterRange ast.Expr, - iterVar string, + iterVar, accuVar string, accuInit ast.Expr, condition ast.Expr, @@ -125,6 +125,18 @@ func (p *parserHelper) newComprehension(ctx any, p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) } +func (p *parserHelper) newComprehensionTwoVar(ctx any, + iterRange ast.Expr, + iterVar, iterVar2, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehensionTwoVar( + p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) +} + func (p *parserHelper) newID(ctx any) int64 { if id, isID := ctx.(int64); isID { return id @@ -140,15 +152,12 @@ func (p *parserHelper) id(ctx any) int64 { var offset ast.OffsetRange switch c := ctx.(type) { case antlr.ParserRuleContext: - start, stop := c.GetStart(), c.GetStop() - if stop == nil { - stop = start - } + start := c.GetStart() offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) - offset.Stop = p.sourceInfo.ComputeOffset(int32(stop.GetLine()), int32(stop.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) case antlr.Token: offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) - offset.Stop = offset.Start + offset.Stop = offset.Start + int32(len(c.GetText())) case common.Location: offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) offset.Stop = offset.Start @@ -164,10 +173,21 @@ func (p *parserHelper) id(ctx any) int64 { return id } +func (p *parserHelper) deleteID(id int64) { + p.sourceInfo.ClearOffsetRange(id) + if id == p.nextID-1 { + p.nextID-- + } +} + func (p *parserHelper) getLocation(id int64) common.Location { return p.sourceInfo.GetStartLocation(id) } +func (p *parserHelper) getLocationByOffset(offset int32) common.Location { + return p.getSourceInfo().GetLocationByOffset(offset) +} + // buildMacroCallArg iterates the expression and returns a new expression // where all macros have been replaced by their IDs in MacroCalls func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { @@ -375,8 +395,10 @@ func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { cond := e.Copy(compre.LoopCondition()) step := e.Copy(compre.LoopStep()) result := e.Copy(compre.Result()) - return e.exprFactory.NewComprehension(copyID, - iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result) + // All comprehensions can be represented by the two-variable comprehension since the + // differentiation between one and two-variable is whether the iterVar2 value is non-empty. + return e.exprFactory.NewComprehensionTwoVar(copyID, + iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result) } return e.exprFactory.NewUnspecifiedExpr(copyID) } @@ -424,6 +446,20 @@ func (e *exprHelper) NewComprehension( e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) } +// NewComprehensionTwoVar implements the ExprHelper interface method. +func (e *exprHelper) NewComprehensionTwoVar( + iterRange ast.Expr, + iterVar, + iterVar2, + accuVar string, + accuInit, + condition, + step, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehensionTwoVar( + e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) +} + // NewIdent implements the ExprHelper interface method. func (e *exprHelper) NewIdent(name string) ast.Expr { return e.exprFactory.NewIdent(e.nextMacroID(), name) diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go index 1f4c847e0..dc47b4203 100644 --- a/vendor/github.com/google/cel-go/parser/macro.go +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -170,11 +170,12 @@ type ExprHelper interface { // NewStructField creates a new struct field initializer from the field name and value. NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr - // NewComprehension creates a new comprehension instruction. + // NewComprehension creates a new one-variable comprehension instruction. // // - iterRange represents the expression that resolves to a list or map where the elements or // keys (respectively) will be iterated over. - // - iterVar is the iteration variable name. + // - iterVar is the variable name for the list element value, or the map key, depending on the + // range type. // - accuVar is the accumulation variable name, typically parser.AccumulatorName. // - accuInit is the initial expression whose value will be set for the accuVar prior to // folding. @@ -186,11 +187,36 @@ type ExprHelper interface { // environment in the step and condition expressions. Presently, the name __result__ is commonly // used by built-in macros but this may change in the future. NewComprehension(iterRange ast.Expr, - iterVar string, + iterVar, accuVar string, - accuInit ast.Expr, - condition ast.Expr, - step ast.Expr, + accuInit, + condition, + step, + result ast.Expr) ast.Expr + + // NewComprehensionTwoVar creates a new two-variable comprehension instruction. + // + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - iterVar is the iteration variable assigned to the list index or the map key. + // - iterVar2 is the iteration variable assigned to the list element value or the map key value. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + NewComprehensionTwoVar(iterRange ast.Expr, + iterVar, + iterVar2, + accuVar string, + accuInit, + condition, + step, result ast.Expr) ast.Expr // NewIdent creates an identifier Expr value. @@ -382,13 +408,11 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) result = eh.NewAccuIdent() case quantifierExistsOne: - zeroExpr := eh.NewLiteral(types.Int(0)) - oneExpr := eh.NewLiteral(types.Int(1)) - init = zeroExpr + init = eh.NewLiteral(types.Int(0)) condition = eh.NewLiteral(types.True) step = eh.NewCall(operators.Conditional, args[1], - eh.NewCall(operators.Add, eh.NewAccuIdent(), oneExpr), eh.NewAccuIdent()) - result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), oneExpr) + eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent()) + result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))) default: return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) } diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index cb753df73..5cbb17672 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -856,7 +856,8 @@ func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { // ANTLR Parse listener implementations func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { - l := p.helper.source.NewLocation(line, column) + offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) + l := p.helper.getLocationByOffset(offset) // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error // messages out of ANTLR to prevent future breaking changes related to error message content. @@ -916,10 +917,12 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg expr, err := macro.Expander()(eh, target, args) // An error indicates that the macro was matched, but the arguments were not well-formed. if err != nil { - if err.Location != nil { - return p.reportError(err.Location, err.Message), true + loc := err.Location + if loc == nil { + loc = p.helper.getLocation(exprID) } - return p.reportError(p.helper.getLocation(exprID), err.Message), true + p.helper.deleteID(exprID) + return p.reportError(loc, err.Message), true } // A nil value from the macro indicates that the macro implementation decided that // an expansion should not be performed. @@ -929,6 +932,7 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg if p.populateMacroCalls { p.helper.addMacroCall(expr.ID(), function, target, args...) } + p.helper.deleteID(exprID) return expr, true } diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go index 250c81e8c..16ae66faa 100644 --- a/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,8 +20,8 @@ import ( "os/exec" "strings" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" @@ -33,7 +33,7 @@ type ExtensionHandler struct { } // CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) { if context == nil || context.ExtensionHandlers == nil { return false, nil, nil } @@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl return handled, response, err } -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) { if extensionHandlers.Name != "" { yamlData, _ := yaml.Marshal(in) request := &extensions.ExtensionHandlerRequest{ diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go index a71df8abe..16c40d985 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go +++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.3 +// protoc-gen-go v1.35.1 +// protoc v4.23.4 // source: extensions/extension.proto package gnostic_extension_v1 @@ -51,11 +51,9 @@ type Version struct { func (x *Version) Reset() { *x = Version{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Version) String() string { @@ -66,7 +64,7 @@ func (*Version) ProtoMessage() {} func (x *Version) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -123,11 +121,9 @@ type ExtensionHandlerRequest struct { func (x *ExtensionHandlerRequest) Reset() { *x = ExtensionHandlerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionHandlerRequest) String() string { @@ -138,7 +134,7 @@ func (*ExtensionHandlerRequest) ProtoMessage() {} func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -191,11 +187,9 @@ type ExtensionHandlerResponse struct { func (x *ExtensionHandlerResponse) Reset() { *x = ExtensionHandlerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionHandlerResponse) String() string { @@ -206,7 +200,7 @@ func (*ExtensionHandlerResponse) ProtoMessage() {} func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -257,11 +251,9 @@ type Wrapper struct { func (x *Wrapper) Reset() { *x = Wrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Wrapper) String() string { @@ -272,7 +264,7 @@ func (*Wrapper) ProtoMessage() {} func (x *Wrapper) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -367,7 +359,7 @@ func file_extensions_extension_proto_rawDescGZIP() []byte { } var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_extensions_extension_proto_goTypes = []interface{}{ +var file_extensions_extension_proto_goTypes = []any{ (*Version)(nil), // 0: gnostic.extension.v1.Version (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse @@ -390,56 +382,6 @@ func file_extensions_extension_proto_init() { if File_extensions_extension_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Wrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go index ec8afd009..0768163e5 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extensions.go +++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go @@ -19,8 +19,8 @@ import ( "log" "os" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) @@ -54,7 +54,7 @@ func Main(handler extensionHandler) { response.Errors = append(response.Errors, err.Error()) } else if handled { response.Handled = true - response.Value, err = ptypes.MarshalAny(output) + response.Value, err = anypb.New(output) if err != nil { response.Errors = append(response.Errors, err.Error()) } diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go index 65c4c913c..3b930b3de 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.3 +// protoc-gen-go v1.35.1 +// protoc v4.23.4 // source: openapiv2/OpenAPIv2.proto package openapi_v2 @@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` @@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct { func (x *AdditionalPropertiesItem) Reset() { *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AdditionalPropertiesItem) String() string { @@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {} func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -128,11 +127,9 @@ type Any struct { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -186,11 +183,9 @@ type ApiKeySecurity struct { func (x *ApiKeySecurity) Reset() { *x = ApiKeySecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ApiKeySecurity) String() string { @@ -201,7 +196,7 @@ func (*ApiKeySecurity) ProtoMessage() {} func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -263,11 +258,9 @@ type BasicAuthenticationSecurity struct { func (x *BasicAuthenticationSecurity) Reset() { *x = BasicAuthenticationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BasicAuthenticationSecurity) String() string { @@ -278,7 +271,7 @@ func (*BasicAuthenticationSecurity) ProtoMessage() {} func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -333,11 +326,9 @@ type BodyParameter struct { func (x *BodyParameter) Reset() { *x = BodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BodyParameter) String() string { @@ -348,7 +339,7 @@ func (*BodyParameter) ProtoMessage() {} func (x *BodyParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -422,11 +413,9 @@ type Contact struct { func (x *Contact) Reset() { *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Contact) String() string { @@ -437,7 +426,7 @@ func (*Contact) ProtoMessage() {} func (x *Contact) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -490,11 +479,9 @@ type Default struct { func (x *Default) Reset() { *x = Default{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Default) String() string { @@ -505,7 +492,7 @@ func (*Default) ProtoMessage() {} func (x *Default) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -538,11 +525,9 @@ type Definitions struct { func (x *Definitions) Reset() { *x = Definitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Definitions) String() string { @@ -553,7 +538,7 @@ func (*Definitions) ProtoMessage() {} func (x *Definitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -606,11 +591,9 @@ type Document struct { func (x *Document) Reset() { *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Document) String() string { @@ -621,7 +604,7 @@ func (*Document) ProtoMessage() {} func (x *Document) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -758,11 +741,9 @@ type Examples struct { func (x *Examples) Reset() { *x = Examples{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Examples) String() string { @@ -773,7 +754,7 @@ func (*Examples) ProtoMessage() {} func (x *Examples) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -808,11 +789,9 @@ type ExternalDocs struct { func (x *ExternalDocs) Reset() { *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExternalDocs) String() string { @@ -823,7 +802,7 @@ func (*ExternalDocs) ProtoMessage() {} func (x *ExternalDocs) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -879,11 +858,9 @@ type FileSchema struct { func (x *FileSchema) Reset() { *x = FileSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileSchema) String() string { @@ -894,7 +871,7 @@ func (*FileSchema) ProtoMessage() {} func (x *FileSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1016,11 +993,9 @@ type FormDataParameterSubSchema struct { func (x *FormDataParameterSubSchema) Reset() { *x = FormDataParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FormDataParameterSubSchema) String() string { @@ -1031,7 +1006,7 @@ func (*FormDataParameterSubSchema) ProtoMessage() {} func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1235,11 +1210,9 @@ type Header struct { func (x *Header) Reset() { *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Header) String() string { @@ -1250,7 +1223,7 @@ func (*Header) ProtoMessage() {} func (x *Header) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1433,11 +1406,9 @@ type HeaderParameterSubSchema struct { func (x *HeaderParameterSubSchema) Reset() { *x = HeaderParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HeaderParameterSubSchema) String() string { @@ -1448,7 +1419,7 @@ func (*HeaderParameterSubSchema) ProtoMessage() {} func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1627,11 +1598,9 @@ type Headers struct { func (x *Headers) Reset() { *x = Headers{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Headers) String() string { @@ -1642,7 +1611,7 @@ func (*Headers) ProtoMessage() {} func (x *Headers) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1685,11 +1654,9 @@ type Info struct { func (x *Info) Reset() { *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Info) String() string { @@ -1700,7 +1667,7 @@ func (*Info) ProtoMessage() {} func (x *Info) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1774,11 +1741,9 @@ type ItemsItem struct { func (x *ItemsItem) Reset() { *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ItemsItem) String() string { @@ -1789,7 +1754,7 @@ func (*ItemsItem) ProtoMessage() {} func (x *ItemsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1822,11 +1787,9 @@ type JsonReference struct { func (x *JsonReference) Reset() { *x = JsonReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *JsonReference) String() string { @@ -1837,7 +1800,7 @@ func (*JsonReference) ProtoMessage() {} func (x *JsonReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1880,11 +1843,9 @@ type License struct { func (x *License) Reset() { *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *License) String() string { @@ -1895,7 +1856,7 @@ func (*License) ProtoMessage() {} func (x *License) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1945,11 +1906,9 @@ type NamedAny struct { func (x *NamedAny) Reset() { *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedAny) String() string { @@ -1960,7 +1919,7 @@ func (*NamedAny) ProtoMessage() {} func (x *NamedAny) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2003,11 +1962,9 @@ type NamedHeader struct { func (x *NamedHeader) Reset() { *x = NamedHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedHeader) String() string { @@ -2018,7 +1975,7 @@ func (*NamedHeader) ProtoMessage() {} func (x *NamedHeader) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2061,11 +2018,9 @@ type NamedParameter struct { func (x *NamedParameter) Reset() { *x = NamedParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedParameter) String() string { @@ -2076,7 +2031,7 @@ func (*NamedParameter) ProtoMessage() {} func (x *NamedParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2119,11 +2074,9 @@ type NamedPathItem struct { func (x *NamedPathItem) Reset() { *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedPathItem) String() string { @@ -2134,7 +2087,7 @@ func (*NamedPathItem) ProtoMessage() {} func (x *NamedPathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2177,11 +2130,9 @@ type NamedResponse struct { func (x *NamedResponse) Reset() { *x = NamedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedResponse) String() string { @@ -2192,7 +2143,7 @@ func (*NamedResponse) ProtoMessage() {} func (x *NamedResponse) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2235,11 +2186,9 @@ type NamedResponseValue struct { func (x *NamedResponseValue) Reset() { *x = NamedResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedResponseValue) String() string { @@ -2250,7 +2199,7 @@ func (*NamedResponseValue) ProtoMessage() {} func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2293,11 +2242,9 @@ type NamedSchema struct { func (x *NamedSchema) Reset() { *x = NamedSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSchema) String() string { @@ -2308,7 +2255,7 @@ func (*NamedSchema) ProtoMessage() {} func (x *NamedSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2351,11 +2298,9 @@ type NamedSecurityDefinitionsItem struct { func (x *NamedSecurityDefinitionsItem) Reset() { *x = NamedSecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSecurityDefinitionsItem) String() string { @@ -2366,7 +2311,7 @@ func (*NamedSecurityDefinitionsItem) ProtoMessage() {} func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2409,11 +2354,9 @@ type NamedString struct { func (x *NamedString) Reset() { *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedString) String() string { @@ -2424,7 +2367,7 @@ func (*NamedString) ProtoMessage() {} func (x *NamedString) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2467,11 +2410,9 @@ type NamedStringArray struct { func (x *NamedStringArray) Reset() { *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedStringArray) String() string { @@ -2482,7 +2423,7 @@ func (*NamedStringArray) ProtoMessage() {} func (x *NamedStringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2517,6 +2458,7 @@ type NonBodyParameter struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *NonBodyParameter_HeaderParameterSubSchema // *NonBodyParameter_FormDataParameterSubSchema // *NonBodyParameter_QueryParameterSubSchema @@ -2526,11 +2468,9 @@ type NonBodyParameter struct { func (x *NonBodyParameter) Reset() { *x = NonBodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NonBodyParameter) String() string { @@ -2541,7 +2481,7 @@ func (*NonBodyParameter) ProtoMessage() {} func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2635,11 +2575,9 @@ type Oauth2AccessCodeSecurity struct { func (x *Oauth2AccessCodeSecurity) Reset() { *x = Oauth2AccessCodeSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2AccessCodeSecurity) String() string { @@ -2650,7 +2588,7 @@ func (*Oauth2AccessCodeSecurity) ProtoMessage() {} func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2729,11 +2667,9 @@ type Oauth2ApplicationSecurity struct { func (x *Oauth2ApplicationSecurity) Reset() { *x = Oauth2ApplicationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2ApplicationSecurity) String() string { @@ -2744,7 +2680,7 @@ func (*Oauth2ApplicationSecurity) ProtoMessage() {} func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2816,11 +2752,9 @@ type Oauth2ImplicitSecurity struct { func (x *Oauth2ImplicitSecurity) Reset() { *x = Oauth2ImplicitSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2ImplicitSecurity) String() string { @@ -2831,7 +2765,7 @@ func (*Oauth2ImplicitSecurity) ProtoMessage() {} func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2903,11 +2837,9 @@ type Oauth2PasswordSecurity struct { func (x *Oauth2PasswordSecurity) Reset() { *x = Oauth2PasswordSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2PasswordSecurity) String() string { @@ -2918,7 +2850,7 @@ func (*Oauth2PasswordSecurity) ProtoMessage() {} func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2985,11 +2917,9 @@ type Oauth2Scopes struct { func (x *Oauth2Scopes) Reset() { *x = Oauth2Scopes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2Scopes) String() string { @@ -3000,7 +2930,7 @@ func (*Oauth2Scopes) ProtoMessage() {} func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3051,11 +2981,9 @@ type Operation struct { func (x *Operation) Reset() { *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Operation) String() string { @@ -3066,7 +2994,7 @@ func (*Operation) ProtoMessage() {} func (x *Operation) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3178,6 +3106,7 @@ type Parameter struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *Parameter_BodyParameter // *Parameter_NonBodyParameter Oneof isParameter_Oneof `protobuf_oneof:"oneof"` @@ -3185,11 +3114,9 @@ type Parameter struct { func (x *Parameter) Reset() { *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Parameter) String() string { @@ -3200,7 +3127,7 @@ func (*Parameter) ProtoMessage() {} func (x *Parameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3263,11 +3190,9 @@ type ParameterDefinitions struct { func (x *ParameterDefinitions) Reset() { *x = ParameterDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParameterDefinitions) String() string { @@ -3278,7 +3203,7 @@ func (*ParameterDefinitions) ProtoMessage() {} func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3306,6 +3231,7 @@ type ParametersItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ParametersItem_Parameter // *ParametersItem_JsonReference Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` @@ -3313,11 +3239,9 @@ type ParametersItem struct { func (x *ParametersItem) Reset() { *x = ParametersItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParametersItem) String() string { @@ -3328,7 +3252,7 @@ func (*ParametersItem) ProtoMessage() {} func (x *ParametersItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3400,11 +3324,9 @@ type PathItem struct { func (x *PathItem) Reset() { *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathItem) String() string { @@ -3415,7 +3337,7 @@ func (*PathItem) ProtoMessage() {} func (x *PathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3535,11 +3457,9 @@ type PathParameterSubSchema struct { func (x *PathParameterSubSchema) Reset() { *x = PathParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathParameterSubSchema) String() string { @@ -3550,7 +3470,7 @@ func (*PathParameterSubSchema) ProtoMessage() {} func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3731,11 +3651,9 @@ type Paths struct { func (x *Paths) Reset() { *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Paths) String() string { @@ -3746,7 +3664,7 @@ func (*Paths) ProtoMessage() {} func (x *Paths) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3802,11 +3720,9 @@ type PrimitivesItems struct { func (x *PrimitivesItems) Reset() { *x = PrimitivesItems{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PrimitivesItems) String() string { @@ -3817,7 +3733,7 @@ func (*PrimitivesItems) ProtoMessage() {} func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3968,11 +3884,9 @@ type Properties struct { func (x *Properties) Reset() { *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Properties) String() string { @@ -3983,7 +3897,7 @@ func (*Properties) ProtoMessage() {} func (x *Properties) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4042,11 +3956,9 @@ type QueryParameterSubSchema struct { func (x *QueryParameterSubSchema) Reset() { *x = QueryParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueryParameterSubSchema) String() string { @@ -4057,7 +3969,7 @@ func (*QueryParameterSubSchema) ProtoMessage() {} func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4247,11 +4159,9 @@ type Response struct { func (x *Response) Reset() { *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Response) String() string { @@ -4262,7 +4172,7 @@ func (*Response) ProtoMessage() {} func (x *Response) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4323,11 +4233,9 @@ type ResponseDefinitions struct { func (x *ResponseDefinitions) Reset() { *x = ResponseDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseDefinitions) String() string { @@ -4338,7 +4246,7 @@ func (*ResponseDefinitions) ProtoMessage() {} func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4366,6 +4274,7 @@ type ResponseValue struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ResponseValue_Response // *ResponseValue_JsonReference Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` @@ -4373,11 +4282,9 @@ type ResponseValue struct { func (x *ResponseValue) Reset() { *x = ResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseValue) String() string { @@ -4388,7 +4295,7 @@ func (*ResponseValue) ProtoMessage() {} func (x *ResponseValue) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4452,11 +4359,9 @@ type Responses struct { func (x *Responses) Reset() { *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Responses) String() string { @@ -4467,7 +4372,7 @@ func (*Responses) ProtoMessage() {} func (x *Responses) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4537,11 +4442,9 @@ type Schema struct { func (x *Schema) Reset() { *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Schema) String() string { @@ -4552,7 +4455,7 @@ func (*Schema) ProtoMessage() {} func (x *Schema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4790,6 +4693,7 @@ type SchemaItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SchemaItem_Schema // *SchemaItem_FileSchema Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` @@ -4797,11 +4701,9 @@ type SchemaItem struct { func (x *SchemaItem) Reset() { *x = SchemaItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SchemaItem) String() string { @@ -4812,7 +4714,7 @@ func (*SchemaItem) ProtoMessage() {} func (x *SchemaItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4874,11 +4776,9 @@ type SecurityDefinitions struct { func (x *SecurityDefinitions) Reset() { *x = SecurityDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityDefinitions) String() string { @@ -4889,7 +4789,7 @@ func (*SecurityDefinitions) ProtoMessage() {} func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4917,6 +4817,7 @@ type SecurityDefinitionsItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SecurityDefinitionsItem_BasicAuthenticationSecurity // *SecurityDefinitionsItem_ApiKeySecurity // *SecurityDefinitionsItem_Oauth2ImplicitSecurity @@ -4928,11 +4829,9 @@ type SecurityDefinitionsItem struct { func (x *SecurityDefinitionsItem) Reset() { *x = SecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityDefinitionsItem) String() string { @@ -4943,7 +4842,7 @@ func (*SecurityDefinitionsItem) ProtoMessage() {} func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5057,11 +4956,9 @@ type SecurityRequirement struct { func (x *SecurityRequirement) Reset() { *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityRequirement) String() string { @@ -5072,7 +4969,7 @@ func (*SecurityRequirement) ProtoMessage() {} func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5104,11 +5001,9 @@ type StringArray struct { func (x *StringArray) Reset() { *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringArray) String() string { @@ -5119,7 +5014,7 @@ func (*StringArray) ProtoMessage() {} func (x *StringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5154,11 +5049,9 @@ type Tag struct { func (x *Tag) Reset() { *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Tag) String() string { @@ -5169,7 +5062,7 @@ func (*Tag) ProtoMessage() {} func (x *Tag) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5222,11 +5115,9 @@ type TypeItem struct { func (x *TypeItem) Reset() { *x = TypeItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TypeItem) String() string { @@ -5237,7 +5128,7 @@ func (*TypeItem) ProtoMessage() {} func (x *TypeItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5270,11 +5161,9 @@ type VendorExtension struct { func (x *VendorExtension) Reset() { *x = VendorExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VendorExtension) String() string { @@ -5285,7 +5174,7 @@ func (*VendorExtension) ProtoMessage() {} func (x *VendorExtension) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5322,11 +5211,9 @@ type Xml struct { func (x *Xml) Reset() { *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Xml) String() string { @@ -5337,7 +5224,7 @@ func (*Xml) ProtoMessage() {} func (x *Xml) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6356,7 +6243,7 @@ func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { } var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) -var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ +var file_openapiv2_OpenAPIv2_proto_goTypes = []any{ (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem (*Any)(nil), // 1: openapi.v2.Any (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity @@ -6565,755 +6452,33 @@ func file_openapiv2_OpenAPIv2_proto_init() { if File_openapiv2_OpenAPIv2_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApiKeySecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicAuthenticationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Default); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Definitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Examples); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FormDataParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Headers); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JsonReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonBodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2AccessCodeSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ApplicationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ImplicitSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2PasswordSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2Scopes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimitivesItems); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypeItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VendorExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []any{ (*AdditionalPropertiesItem_Schema)(nil), (*AdditionalPropertiesItem_Boolean)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []any{ (*NonBodyParameter_HeaderParameterSubSchema)(nil), (*NonBodyParameter_FormDataParameterSubSchema)(nil), (*NonBodyParameter_QueryParameterSubSchema)(nil), (*NonBodyParameter_PathParameterSubSchema)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []any{ (*Parameter_BodyParameter)(nil), (*Parameter_NonBodyParameter)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []any{ (*ParametersItem_Parameter)(nil), (*ParametersItem_JsonReference)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []any{ (*ResponseValue_Response)(nil), (*ResponseValue_JsonReference)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []any{ (*SchemaItem_Schema)(nil), (*SchemaItem_FileSchema)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []any{ (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), (*SecurityDefinitionsItem_ApiKeySecurity)(nil), (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go index 945b8d11f..b9df95a37 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.3 +// protoc-gen-go v1.35.1 +// protoc v4.23.4 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *AdditionalPropertiesItem_SchemaOrReference // *AdditionalPropertiesItem_Boolean Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` @@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct { func (x *AdditionalPropertiesItem) Reset() { *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AdditionalPropertiesItem) String() string { @@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {} func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -128,11 +127,9 @@ type Any struct { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -178,6 +175,7 @@ type AnyOrExpression struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *AnyOrExpression_Any // *AnyOrExpression_Expression Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"` @@ -185,11 +183,9 @@ type AnyOrExpression struct { func (x *AnyOrExpression) Reset() { *x = AnyOrExpression{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AnyOrExpression) String() string { @@ -200,7 +196,7 @@ func (*AnyOrExpression) ProtoMessage() {} func (x *AnyOrExpression) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -264,11 +260,9 @@ type Callback struct { func (x *Callback) Reset() { *x = Callback{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Callback) String() string { @@ -279,7 +273,7 @@ func (*Callback) ProtoMessage() {} func (x *Callback) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -314,6 +308,7 @@ type CallbackOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *CallbackOrReference_Callback // *CallbackOrReference_Reference Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"` @@ -321,11 +316,9 @@ type CallbackOrReference struct { func (x *CallbackOrReference) Reset() { *x = CallbackOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CallbackOrReference) String() string { @@ -336,7 +329,7 @@ func (*CallbackOrReference) ProtoMessage() {} func (x *CallbackOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -398,11 +391,9 @@ type CallbacksOrReferences struct { func (x *CallbacksOrReferences) Reset() { *x = CallbacksOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CallbacksOrReferences) String() string { @@ -413,7 +404,7 @@ func (*CallbacksOrReferences) ProtoMessage() {} func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -455,11 +446,9 @@ type Components struct { func (x *Components) Reset() { *x = Components{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Components) String() string { @@ -470,7 +459,7 @@ func (*Components) ProtoMessage() {} func (x *Components) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -569,11 +558,9 @@ type Contact struct { func (x *Contact) Reset() { *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Contact) String() string { @@ -584,7 +571,7 @@ func (*Contact) ProtoMessage() {} func (x *Contact) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -633,6 +620,7 @@ type DefaultType struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *DefaultType_Number // *DefaultType_Boolean // *DefaultType_String_ @@ -641,11 +629,9 @@ type DefaultType struct { func (x *DefaultType) Reset() { *x = DefaultType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DefaultType) String() string { @@ -656,7 +642,7 @@ func (*DefaultType) ProtoMessage() {} func (x *DefaultType) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -734,11 +720,9 @@ type Discriminator struct { func (x *Discriminator) Reset() { *x = Discriminator{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Discriminator) String() string { @@ -749,7 +733,7 @@ func (*Discriminator) ProtoMessage() {} func (x *Discriminator) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -803,11 +787,9 @@ type Document struct { func (x *Document) Reset() { *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Document) String() string { @@ -818,7 +800,7 @@ func (*Document) ProtoMessage() {} func (x *Document) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -912,11 +894,9 @@ type Encoding struct { func (x *Encoding) Reset() { *x = Encoding{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Encoding) String() string { @@ -927,7 +907,7 @@ func (*Encoding) ProtoMessage() {} func (x *Encoding) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -994,11 +974,9 @@ type Encodings struct { func (x *Encodings) Reset() { *x = Encodings{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Encodings) String() string { @@ -1009,7 +987,7 @@ func (*Encodings) ProtoMessage() {} func (x *Encodings) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1045,11 +1023,9 @@ type Example struct { func (x *Example) Reset() { *x = Example{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Example) String() string { @@ -1060,7 +1036,7 @@ func (*Example) ProtoMessage() {} func (x *Example) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1116,6 +1092,7 @@ type ExampleOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ExampleOrReference_Example // *ExampleOrReference_Reference Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"` @@ -1123,11 +1100,9 @@ type ExampleOrReference struct { func (x *ExampleOrReference) Reset() { *x = ExampleOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExampleOrReference) String() string { @@ -1138,7 +1113,7 @@ func (*ExampleOrReference) ProtoMessage() {} func (x *ExampleOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1200,11 +1175,9 @@ type ExamplesOrReferences struct { func (x *ExamplesOrReferences) Reset() { *x = ExamplesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExamplesOrReferences) String() string { @@ -1215,7 +1188,7 @@ func (*ExamplesOrReferences) ProtoMessage() {} func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1247,11 +1220,9 @@ type Expression struct { func (x *Expression) Reset() { *x = Expression{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expression) String() string { @@ -1262,7 +1233,7 @@ func (*Expression) ProtoMessage() {} func (x *Expression) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1297,11 +1268,9 @@ type ExternalDocs struct { func (x *ExternalDocs) Reset() { *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExternalDocs) String() string { @@ -1312,7 +1281,7 @@ func (*ExternalDocs) ProtoMessage() {} func (x *ExternalDocs) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1370,11 +1339,9 @@ type Header struct { func (x *Header) Reset() { *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Header) String() string { @@ -1385,7 +1352,7 @@ func (*Header) ProtoMessage() {} func (x *Header) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1490,6 +1457,7 @@ type HeaderOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *HeaderOrReference_Header // *HeaderOrReference_Reference Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"` @@ -1497,11 +1465,9 @@ type HeaderOrReference struct { func (x *HeaderOrReference) Reset() { *x = HeaderOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HeaderOrReference) String() string { @@ -1512,7 +1478,7 @@ func (*HeaderOrReference) ProtoMessage() {} func (x *HeaderOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1574,11 +1540,9 @@ type HeadersOrReferences struct { func (x *HeadersOrReferences) Reset() { *x = HeadersOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HeadersOrReferences) String() string { @@ -1589,7 +1553,7 @@ func (*HeadersOrReferences) ProtoMessage() {} func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1629,11 +1593,9 @@ type Info struct { func (x *Info) Reset() { *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Info) String() string { @@ -1644,7 +1606,7 @@ func (*Info) ProtoMessage() {} func (x *Info) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1725,11 +1687,9 @@ type ItemsItem struct { func (x *ItemsItem) Reset() { *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ItemsItem) String() string { @@ -1740,7 +1700,7 @@ func (*ItemsItem) ProtoMessage() {} func (x *ItemsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1775,11 +1735,9 @@ type License struct { func (x *License) Reset() { *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *License) String() string { @@ -1790,7 +1748,7 @@ func (*License) ProtoMessage() {} func (x *License) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1843,11 +1801,9 @@ type Link struct { func (x *Link) Reset() { *x = Link{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Link) String() string { @@ -1858,7 +1814,7 @@ func (*Link) ProtoMessage() {} func (x *Link) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1928,6 +1884,7 @@ type LinkOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *LinkOrReference_Link // *LinkOrReference_Reference Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"` @@ -1935,11 +1892,9 @@ type LinkOrReference struct { func (x *LinkOrReference) Reset() { *x = LinkOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LinkOrReference) String() string { @@ -1950,7 +1905,7 @@ func (*LinkOrReference) ProtoMessage() {} func (x *LinkOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2012,11 +1967,9 @@ type LinksOrReferences struct { func (x *LinksOrReferences) Reset() { *x = LinksOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LinksOrReferences) String() string { @@ -2027,7 +1980,7 @@ func (*LinksOrReferences) ProtoMessage() {} func (x *LinksOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2064,11 +2017,9 @@ type MediaType struct { func (x *MediaType) Reset() { *x = MediaType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MediaType) String() string { @@ -2079,7 +2030,7 @@ func (*MediaType) ProtoMessage() {} func (x *MediaType) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2139,11 +2090,9 @@ type MediaTypes struct { func (x *MediaTypes) Reset() { *x = MediaTypes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MediaTypes) String() string { @@ -2154,7 +2103,7 @@ func (*MediaTypes) ProtoMessage() {} func (x *MediaTypes) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2190,11 +2139,9 @@ type NamedAny struct { func (x *NamedAny) Reset() { *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedAny) String() string { @@ -2205,7 +2152,7 @@ func (*NamedAny) ProtoMessage() {} func (x *NamedAny) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2248,11 +2195,9 @@ type NamedCallbackOrReference struct { func (x *NamedCallbackOrReference) Reset() { *x = NamedCallbackOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedCallbackOrReference) String() string { @@ -2263,7 +2208,7 @@ func (*NamedCallbackOrReference) ProtoMessage() {} func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2306,11 +2251,9 @@ type NamedEncoding struct { func (x *NamedEncoding) Reset() { *x = NamedEncoding{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedEncoding) String() string { @@ -2321,7 +2264,7 @@ func (*NamedEncoding) ProtoMessage() {} func (x *NamedEncoding) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2364,11 +2307,9 @@ type NamedExampleOrReference struct { func (x *NamedExampleOrReference) Reset() { *x = NamedExampleOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedExampleOrReference) String() string { @@ -2379,7 +2320,7 @@ func (*NamedExampleOrReference) ProtoMessage() {} func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2422,11 +2363,9 @@ type NamedHeaderOrReference struct { func (x *NamedHeaderOrReference) Reset() { *x = NamedHeaderOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedHeaderOrReference) String() string { @@ -2437,7 +2376,7 @@ func (*NamedHeaderOrReference) ProtoMessage() {} func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2480,11 +2419,9 @@ type NamedLinkOrReference struct { func (x *NamedLinkOrReference) Reset() { *x = NamedLinkOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedLinkOrReference) String() string { @@ -2495,7 +2432,7 @@ func (*NamedLinkOrReference) ProtoMessage() {} func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2538,11 +2475,9 @@ type NamedMediaType struct { func (x *NamedMediaType) Reset() { *x = NamedMediaType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedMediaType) String() string { @@ -2553,7 +2488,7 @@ func (*NamedMediaType) ProtoMessage() {} func (x *NamedMediaType) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2596,11 +2531,9 @@ type NamedParameterOrReference struct { func (x *NamedParameterOrReference) Reset() { *x = NamedParameterOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedParameterOrReference) String() string { @@ -2611,7 +2544,7 @@ func (*NamedParameterOrReference) ProtoMessage() {} func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2654,11 +2587,9 @@ type NamedPathItem struct { func (x *NamedPathItem) Reset() { *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedPathItem) String() string { @@ -2669,7 +2600,7 @@ func (*NamedPathItem) ProtoMessage() {} func (x *NamedPathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2712,11 +2643,9 @@ type NamedRequestBodyOrReference struct { func (x *NamedRequestBodyOrReference) Reset() { *x = NamedRequestBodyOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedRequestBodyOrReference) String() string { @@ -2727,7 +2656,7 @@ func (*NamedRequestBodyOrReference) ProtoMessage() {} func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2770,11 +2699,9 @@ type NamedResponseOrReference struct { func (x *NamedResponseOrReference) Reset() { *x = NamedResponseOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedResponseOrReference) String() string { @@ -2785,7 +2712,7 @@ func (*NamedResponseOrReference) ProtoMessage() {} func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2828,11 +2755,9 @@ type NamedSchemaOrReference struct { func (x *NamedSchemaOrReference) Reset() { *x = NamedSchemaOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSchemaOrReference) String() string { @@ -2843,7 +2768,7 @@ func (*NamedSchemaOrReference) ProtoMessage() {} func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2886,11 +2811,9 @@ type NamedSecuritySchemeOrReference struct { func (x *NamedSecuritySchemeOrReference) Reset() { *x = NamedSecuritySchemeOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSecuritySchemeOrReference) String() string { @@ -2901,7 +2824,7 @@ func (*NamedSecuritySchemeOrReference) ProtoMessage() {} func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2944,11 +2867,9 @@ type NamedServerVariable struct { func (x *NamedServerVariable) Reset() { *x = NamedServerVariable{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedServerVariable) String() string { @@ -2959,7 +2880,7 @@ func (*NamedServerVariable) ProtoMessage() {} func (x *NamedServerVariable) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3002,11 +2923,9 @@ type NamedString struct { func (x *NamedString) Reset() { *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedString) String() string { @@ -3017,7 +2936,7 @@ func (*NamedString) ProtoMessage() {} func (x *NamedString) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3060,11 +2979,9 @@ type NamedStringArray struct { func (x *NamedStringArray) Reset() { *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedStringArray) String() string { @@ -3075,7 +2992,7 @@ func (*NamedStringArray) ProtoMessage() {} func (x *NamedStringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3119,11 +3036,9 @@ type OauthFlow struct { func (x *OauthFlow) Reset() { *x = OauthFlow{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OauthFlow) String() string { @@ -3134,7 +3049,7 @@ func (*OauthFlow) ProtoMessage() {} func (x *OauthFlow) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3199,11 +3114,9 @@ type OauthFlows struct { func (x *OauthFlows) Reset() { *x = OauthFlows{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OauthFlows) String() string { @@ -3214,7 +3127,7 @@ func (*OauthFlows) ProtoMessage() {} func (x *OauthFlows) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3187,9 @@ type Object struct { func (x *Object) Reset() { *x = Object{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Object) String() string { @@ -3289,7 +3200,7 @@ func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3334,11 +3245,9 @@ type Operation struct { func (x *Operation) Reset() { *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Operation) String() string { @@ -3349,7 +3258,7 @@ func (*Operation) ProtoMessage() {} func (x *Operation) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3479,11 +3388,9 @@ type Parameter struct { func (x *Parameter) Reset() { *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Parameter) String() string { @@ -3494,7 +3401,7 @@ func (*Parameter) ProtoMessage() {} func (x *Parameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3613,6 +3520,7 @@ type ParameterOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ParameterOrReference_Parameter // *ParameterOrReference_Reference Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"` @@ -3620,11 +3528,9 @@ type ParameterOrReference struct { func (x *ParameterOrReference) Reset() { *x = ParameterOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParameterOrReference) String() string { @@ -3635,7 +3541,7 @@ func (*ParameterOrReference) ProtoMessage() {} func (x *ParameterOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3697,11 +3603,9 @@ type ParametersOrReferences struct { func (x *ParametersOrReferences) Reset() { *x = ParametersOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParametersOrReferences) String() string { @@ -3712,7 +3616,7 @@ func (*ParametersOrReferences) ProtoMessage() {} func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3758,11 +3662,9 @@ type PathItem struct { func (x *PathItem) Reset() { *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathItem) String() string { @@ -3773,7 +3675,7 @@ func (*PathItem) ProtoMessage() {} func (x *PathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3898,11 +3800,9 @@ type Paths struct { func (x *Paths) Reset() { *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Paths) String() string { @@ -3913,7 +3813,7 @@ func (*Paths) ProtoMessage() {} func (x *Paths) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3952,11 +3852,9 @@ type Properties struct { func (x *Properties) Reset() { *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Properties) String() string { @@ -3967,7 +3865,7 @@ func (*Properties) ProtoMessage() {} func (x *Properties) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4002,11 +3900,9 @@ type Reference struct { func (x *Reference) Reset() { *x = Reference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Reference) String() string { @@ -4017,7 +3913,7 @@ func (*Reference) ProtoMessage() {} func (x *Reference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4063,11 +3959,9 @@ type RequestBodiesOrReferences struct { func (x *RequestBodiesOrReferences) Reset() { *x = RequestBodiesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestBodiesOrReferences) String() string { @@ -4078,7 +3972,7 @@ func (*RequestBodiesOrReferences) ProtoMessage() {} func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4114,11 +4008,9 @@ type RequestBody struct { func (x *RequestBody) Reset() { *x = RequestBody{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestBody) String() string { @@ -4129,7 +4021,7 @@ func (*RequestBody) ProtoMessage() {} func (x *RequestBody) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4178,6 +4070,7 @@ type RequestBodyOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *RequestBodyOrReference_RequestBody // *RequestBodyOrReference_Reference Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"` @@ -4185,11 +4078,9 @@ type RequestBodyOrReference struct { func (x *RequestBodyOrReference) Reset() { *x = RequestBodyOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestBodyOrReference) String() string { @@ -4200,7 +4091,7 @@ func (*RequestBodyOrReference) ProtoMessage() {} func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4267,11 +4158,9 @@ type Response struct { func (x *Response) Reset() { *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Response) String() string { @@ -4282,7 +4171,7 @@ func (*Response) ProtoMessage() {} func (x *Response) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4338,6 +4227,7 @@ type ResponseOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ResponseOrReference_Response // *ResponseOrReference_Reference Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"` @@ -4345,11 +4235,9 @@ type ResponseOrReference struct { func (x *ResponseOrReference) Reset() { *x = ResponseOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseOrReference) String() string { @@ -4360,7 +4248,7 @@ func (*ResponseOrReference) ProtoMessage() {} func (x *ResponseOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4425,11 +4313,9 @@ type Responses struct { func (x *Responses) Reset() { *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Responses) String() string { @@ -4440,7 +4326,7 @@ func (*Responses) ProtoMessage() {} func (x *Responses) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4486,11 +4372,9 @@ type ResponsesOrReferences struct { func (x *ResponsesOrReferences) Reset() { *x = ResponsesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponsesOrReferences) String() string { @@ -4501,7 +4385,7 @@ func (*ResponsesOrReferences) ProtoMessage() {} func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4569,11 +4453,9 @@ type Schema struct { func (x *Schema) Reset() { *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Schema) String() string { @@ -4584,7 +4466,7 @@ func (*Schema) ProtoMessage() {} func (x *Schema) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4857,6 +4739,7 @@ type SchemaOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SchemaOrReference_Schema // *SchemaOrReference_Reference Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"` @@ -4864,11 +4747,9 @@ type SchemaOrReference struct { func (x *SchemaOrReference) Reset() { *x = SchemaOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SchemaOrReference) String() string { @@ -4879,7 +4760,7 @@ func (*SchemaOrReference) ProtoMessage() {} func (x *SchemaOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4941,11 +4822,9 @@ type SchemasOrReferences struct { func (x *SchemasOrReferences) Reset() { *x = SchemasOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SchemasOrReferences) String() string { @@ -4956,7 +4835,7 @@ func (*SchemasOrReferences) ProtoMessage() {} func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4989,11 +4868,9 @@ type SecurityRequirement struct { func (x *SecurityRequirement) Reset() { *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityRequirement) String() string { @@ -5004,7 +4881,7 @@ func (*SecurityRequirement) ProtoMessage() {} func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5045,11 +4922,9 @@ type SecurityScheme struct { func (x *SecurityScheme) Reset() { *x = SecurityScheme{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityScheme) String() string { @@ -5060,7 +4935,7 @@ func (*SecurityScheme) ProtoMessage() {} func (x *SecurityScheme) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5144,6 +5019,7 @@ type SecuritySchemeOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SecuritySchemeOrReference_SecurityScheme // *SecuritySchemeOrReference_Reference Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"` @@ -5151,11 +5027,9 @@ type SecuritySchemeOrReference struct { func (x *SecuritySchemeOrReference) Reset() { *x = SecuritySchemeOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecuritySchemeOrReference) String() string { @@ -5166,7 +5040,7 @@ func (*SecuritySchemeOrReference) ProtoMessage() {} func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5228,11 +5102,9 @@ type SecuritySchemesOrReferences struct { func (x *SecuritySchemesOrReferences) Reset() { *x = SecuritySchemesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecuritySchemesOrReferences) String() string { @@ -5243,7 +5115,7 @@ func (*SecuritySchemesOrReferences) ProtoMessage() {} func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5279,11 +5151,9 @@ type Server struct { func (x *Server) Reset() { *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Server) String() string { @@ -5294,7 +5164,7 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5351,11 +5221,9 @@ type ServerVariable struct { func (x *ServerVariable) Reset() { *x = ServerVariable{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerVariable) String() string { @@ -5366,7 +5234,7 @@ func (*ServerVariable) ProtoMessage() {} func (x *ServerVariable) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5419,11 +5287,9 @@ type ServerVariables struct { func (x *ServerVariables) Reset() { *x = ServerVariables{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerVariables) String() string { @@ -5434,7 +5300,7 @@ func (*ServerVariables) ProtoMessage() {} func (x *ServerVariables) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5463,6 +5329,7 @@ type SpecificationExtension struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SpecificationExtension_Number // *SpecificationExtension_Boolean // *SpecificationExtension_String_ @@ -5471,11 +5338,9 @@ type SpecificationExtension struct { func (x *SpecificationExtension) Reset() { *x = SpecificationExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SpecificationExtension) String() string { @@ -5486,7 +5351,7 @@ func (*SpecificationExtension) ProtoMessage() {} func (x *SpecificationExtension) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5561,11 +5426,9 @@ type StringArray struct { func (x *StringArray) Reset() { *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringArray) String() string { @@ -5576,7 +5439,7 @@ func (*StringArray) ProtoMessage() {} func (x *StringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5608,11 +5471,9 @@ type Strings struct { func (x *Strings) Reset() { *x = Strings{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Strings) String() string { @@ -5623,7 +5484,7 @@ func (*Strings) ProtoMessage() {} func (x *Strings) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5659,11 +5520,9 @@ type Tag struct { func (x *Tag) Reset() { *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Tag) String() string { @@ -5674,7 +5533,7 @@ func (*Tag) ProtoMessage() {} func (x *Tag) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5733,11 +5592,9 @@ type Xml struct { func (x *Xml) Reset() { *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Xml) String() string { @@ -5748,7 +5605,7 @@ func (*Xml) ProtoMessage() {} func (x *Xml) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6781,7 +6638,7 @@ func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte { } var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78) -var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{ +var file_openapiv3_OpenAPIv3_proto_goTypes = []any{ (*AdditionalPropertiesItem)(nil), // 0: openapi.v3.AdditionalPropertiesItem (*Any)(nil), // 1: openapi.v3.Any (*AnyOrExpression)(nil), // 2: openapi.v3.AnyOrExpression @@ -7040,994 +6897,56 @@ func file_openapiv3_OpenAPIv3_proto_init() { if File_openapiv3_OpenAPIv3_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AnyOrExpression); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Callback); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbackOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbacksOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Components); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DefaultType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Discriminator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Encoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Encodings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Example); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExampleOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExamplesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expression); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeadersOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Link); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LinkOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LinksOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MediaType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MediaTypes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedCallbackOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedEncoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedExampleOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeaderOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedLinkOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedMediaType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameterOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedRequestBodyOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchemaOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecuritySchemeOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedServerVariable); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OauthFlow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OauthFlows); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Reference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBodiesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBody); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBodyOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponsesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemasOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityScheme); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecuritySchemeOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecuritySchemesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerVariable); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerVariables); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SpecificationExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Strings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []any{ (*AdditionalPropertiesItem_SchemaOrReference)(nil), (*AdditionalPropertiesItem_Boolean)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []any{ (*AnyOrExpression_Any)(nil), (*AnyOrExpression_Expression)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []any{ (*CallbackOrReference_Callback)(nil), (*CallbackOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []any{ (*DefaultType_Number)(nil), (*DefaultType_Boolean)(nil), (*DefaultType_String_)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []any{ (*ExampleOrReference_Example)(nil), (*ExampleOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []any{ (*HeaderOrReference_Header)(nil), (*HeaderOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []any{ (*LinkOrReference_Link)(nil), (*LinkOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []any{ (*ParameterOrReference_Parameter)(nil), (*ParameterOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []any{ (*RequestBodyOrReference_RequestBody)(nil), (*RequestBodyOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []any{ (*ResponseOrReference_Response)(nil), (*ResponseOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []any{ (*SchemaOrReference_Schema)(nil), (*SchemaOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []any{ (*SecuritySchemeOrReference_SecurityScheme)(nil), (*SecuritySchemeOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []any{ (*SpecificationExtension_Number)(nil), (*SpecificationExtension_Boolean)(nil), (*SpecificationExtension_String_)(nil), diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go new file mode 100644 index 000000000..f9f1bd265 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go @@ -0,0 +1,182 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v4.23.4 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto new file mode 100644 index 000000000..09ee0aac5 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "google/protobuf/descriptor.proto"; +import "openapiv3/OpenAPIv3.proto"; + +// The Go package name. +option go_package = "./openapiv3;openapi_v3"; +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go index c6d09dae4..720f3cdf5 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go @@ -14,22 +14,29 @@ import ( ) // SortSlices returns a [cmp.Transformer] option that sorts all []V. -// The less function must be of the form "func(T, T) bool" which is used to -// sort any slice with element type V that is assignable to T. +// The lessOrCompareFunc function must be either +// a less function of the form "func(T, T) bool" or +// a compare function of the format "func(T, T) int" +// which is used to sort any slice with element type V that is assignable to T. // -// The less function must be: +// A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // -// The less function does not have to be "total". That is, if !less(x, y) and -// !less(y, x) for two elements x and y, their relative order is maintained. +// A compare function must be: +// - Deterministic: compare(x, y) == compare(x, y) +// - Irreflexive: compare(x, x) == 0 +// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// +// The function does not have to be "total". That is, if x != y, but +// less or compare report inequality, their relative order is maintained. // // SortSlices can be used in conjunction with [EquateEmpty]. -func SortSlices(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) +func SortSlices(lessOrCompareFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessOrCompareFunc) + if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { + panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ss := sliceSorter{vf.Type().In(0), vf} return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort)) @@ -79,28 +86,40 @@ func (ss sliceSorter) checkSort(v reflect.Value) { } func (ss sliceSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i), v.Index(j) - return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + vo := ss.fnc.Call([]reflect.Value{vx, vy})[0] + if vo.Kind() == reflect.Bool { + return vo.Bool() + } else { + return vo.Int() < 0 + } } -// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be a -// sorted []struct{K, V}. The less function must be of the form -// "func(T, T) bool" which is used to sort any map with key K that is -// assignable to T. +// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be +// a sorted []struct{K, V}. The lessOrCompareFunc function must be either +// a less function of the form "func(T, T) bool" or +// a compare function of the format "func(T, T) int" +// which is used to sort any map with key K that is assignable to T. // // Flattening the map into a slice has the property that [cmp.Equal] is able to // use [cmp.Comparer] options on K or the K.Equal method if it exists. // -// The less function must be: +// A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // - Total: if x != y, then either less(x, y) or less(y, x) // +// A compare function must be: +// - Deterministic: compare(x, y) == compare(x, y) +// - Irreflexive: compare(x, x) == 0 +// - Transitive: if compare(x, y) < 0 and compare(y, z) < 0, then compare(x, z) < 0 +// - Total: if x != y, then compare(x, y) != 0 +// // SortMaps can be used in conjunction with [EquateEmpty]. -func SortMaps(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) +func SortMaps(lessOrCompareFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessOrCompareFunc) + if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { + panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ms := mapSorter{vf.Type().In(0), vf} return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) @@ -143,5 +162,10 @@ func (ms mapSorter) checkSort(v reflect.Value) { } func (ms mapSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) - return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + vo := ms.fnc.Call([]reflect.Value{vx, vy})[0] + if vo.Kind() == reflect.Bool { + return vo.Bool() + } else { + return vo.Int() < 0 + } } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index d127d4362..def01a6be 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -19,6 +19,7 @@ const ( tbFunc // func(T) bool ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R @@ -28,11 +29,13 @@ const ( Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { @@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 754496f3b..ba3fce81f 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) { if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 860bb304c..8ce9d3cf3 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -122,6 +122,7 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -237,6 +239,8 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -384,6 +388,7 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index eee0132e7..ba4d74640 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -476,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} + var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -494,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } + if docURL == "" { + docURL = s.DocURL + } } p := &Profile{ @@ -509,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, + DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 5551eb0bf..f47a24390 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -39,6 +39,7 @@ type Profile struct { Location []*Location Function []*Function Comments []string + DocURL string DropFrames string KeepFrames string @@ -53,6 +54,7 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 + docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -555,6 +557,9 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -844,7 +849,7 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", [vsyscall]" and some others, see the code. +// "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c..a65d88eb8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e4478..2f2b34243 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb..2e50082ad 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 568299869..41cd4f503 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is @@ -93,6 +108,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +116,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0b..2fcd7af3c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4..0fa907656 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,33 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +178,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -181,7 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } @@ -201,8 +220,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff4..b1dfc37af 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e8..07c28112c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e438..60c2065dd 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f2..f710036b3 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b..93fb09922 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de486..38ca39cc5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776..66aa5f2dc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af8..000000000 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba..000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md deleted file mode 100644 index 0a1ff9f94..000000000 --- a/vendor/github.com/imdario/mergo/CONTRIBUTING.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Contributing to mergo - -First off, thanks for taking the time to contribute! ❤️ - -All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 - -> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: -> - Star the project -> - Tweet about it -> - Refer this project in your project's readme -> - Mention the project at local meetups and tell your friends/colleagues - - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) - -## Code of Conduct - -This project and everyone participating in it is governed by the -[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). -By participating, you are expected to uphold this code. Please report unacceptable behavior -to <>. - - -## I Have a Question - -> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). - -Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. - -If you then still feel the need to ask a question and need clarification, we recommend the following: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). -- Provide as much context as you can about what you're running into. -- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. - -We will then take care of the issue as soon as possible. - -## I Want To Contribute - -> ### Legal Notice -> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. - -### Reporting Bugs - - -#### Before Submitting a Bug Report - -A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. - -- Make sure that you are using the latest version. -- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). -- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). -- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. -- Collect information about the bug: -- Stack trace (Traceback) -- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) -- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. -- Possibly your input and the output -- Can you reliably reproduce the issue? And can you also reproduce it with older versions? - - -#### How Do I Submit a Good Bug Report? - -> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . - - -We use GitHub issues to track bugs and errors. If you run into an issue with the project: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) -- Explain the behavior you would expect and the actual behavior. -- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. -- Provide the information you collected in the previous section. - -Once it's filed: - -- The project team will label the issue accordingly. -- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. -- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. - -### Suggesting Enhancements - -This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. - - -#### Before Submitting an Enhancement - -- Make sure that you are using the latest version. -- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. -- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. -- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. - - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. -- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. -- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - - -## Attribution -This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298..000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index ffbbb62c7..000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,242 +0,0 @@ -# Mergo - -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Test status][1]][2] -[![OpenSSF Scorecard][21]][22] -[![OpenSSF Best Practices][19]][20] -[![Coverage status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA status][13]][14] - -[![GoDoc][3]][4] -[![Become my sponsor][15]][16] -[![Tidelift][17]][18] - -[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master -[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario -[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo -[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo -[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge -[20]: https://bestpractices.coreinfrastructure.org/projects/7177 -[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge -[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md deleted file mode 100644 index a5de61f77..000000000 --- a/vendor/github.com/imdario/mergo/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | - -## Security contact information - -To report a security vulnerability, please use the -[Tidelift security contact](https://tidelift.com/security). -Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f99..000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index b50d5c2a4..000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 0ef9b2138..000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Transformers Transformers - Overwrite bool - ShouldNotDereference bool - AppendSlice bool - TypeCheck bool - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite && dst.CanSet() { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - - if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { - continue - } - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { - continue - } - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - - // Ensure that all keys in dst are deleted if they are not in src. - if overwriteWithEmptySrc { - for _, key := range dst.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - dst.SetMapIndex(key, reflect.Value{}) - } - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if !config.ShouldNotDereference { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else { - if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { - dst.Set(src) - } - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty -// (i.e. a non-nil pointer is never considered empty). -func WithoutDereference(config *Config) { - config.ShouldNotDereference = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 0a721e2d8..000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerArgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - typ reflect.Type - next *visit - ptr uintptr -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value, shouldDereference bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - if shouldDereference { - return isEmptyValue(v.Elem(), shouldDereference) - } - return false - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 402433593..000000000 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index d31b37815..000000000 --- a/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe - -# Linux perf files -perf.data -perf.data.old - -# gdb history -.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index a22953805..000000000 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,123 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - - ./gen.sh - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index 05c7359e4..000000000 --- a/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,700 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) - * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 - * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 - -* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) - * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 - * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 - * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 - * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 - * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 -https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 - -* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) - * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 - * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 - * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 - * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 - * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 - -* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) - * fse: Fix max header size https://github.com/klauspost/compress/pull/881 - * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 - * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 - -* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) - * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 - -* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 - * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -
- See changes to v1.16.x - - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 -
- -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -To disable all assembly add `-tags=noasm`. This works across all packages. - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -```go - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e2b..000000000 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d5..000000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da6..000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909..000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index e82fa3bb7..000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d60..000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 074018d8f..000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - src = src[:ip] - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - default: - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - s.bw.close() - return nil -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m, symlen := uint32(0), s.symbolLen - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - symlen = uint16(i) + 1 - } - s.symbolLen = symlen - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - if err := br.init(s.br.unread()); err != nil { - return err - } - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfd..000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205..000000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958..000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c663..000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index e36d9742f..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 0ebc9aaac..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// encFourSymbols adds up to 32 bits from four symbols. -// It will not check if there is space for them, -// so the caller must ensure that b has been flushed recently. -func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { - bitsA := encA.nBits - bitsB := bitsA + encB.nBits - bitsC := bitsB + encC.nBits - bitsD := bitsC + encD.nBits - combined := uint64(encA.val) | - (uint64(encB.val) << (bitsA & 63)) | - (uint64(encC.val) << (bitsB & 63)) | - (uint64(encD.val) << (bitsC & 63)) - b.bitContainer |= combined << (b.nBits & 63) - b.nBits += bitsD -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 84aa3d12f..000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,742 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil -} - -func (s *Scratch) compress1xDo(dst, src []byte) []byte { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - bw.close() - return bw.out -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - _ = s.count // Assert that s != nil to speed up the following loop. - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else if s.prevTable[i].nBits == 0 { - reuse = false - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.srcLen)) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count() == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) - huffNode[lowS].setParent(nodeNb) - huffNode[lowS-1].setParent(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].setCount(1 << 30) - } - // fake entry, strong barrier - huffNode0[0].setCount(1 << 31) - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) - huffNode0[n1+1].setParent(nodeNb) - huffNode0[n2+1].setParent(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].setNbBits(0) - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits()]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol()].nBits = v.nbBits() - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count() { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits() - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits() > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) - huffNode[n].setNbBits(maxNbBits) - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits() == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits() >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits() // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count() - lowTotal := 2 * huffNode[lowPos].count() - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + - huffNode[rankLast[nBitsToDecrease]].nbBits()) - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits() == maxNbBits { - n-- - } - huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -// A nodeElt is the fields -// -// count uint32 -// parent uint16 -// symbol byte -// nbBits uint8 -// -// in some order, all squashed into an integer so that the compiler -// always loads and stores entire nodeElts instead of separate fields. -type nodeElt uint64 - -func makeNodeElt(count uint32, symbol byte) nodeElt { - return nodeElt(count) | nodeElt(symbol)<<48 -} - -func (e *nodeElt) count() uint32 { return uint32(*e) } -func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } -func (e *nodeElt) symbol() byte { return byte(*e >> 48) } -func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } - -func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } -func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } -func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 54bd08b25..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, fmt.Errorf("fse decompress returned: %w", err) - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 0 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6b0..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index c4c7ab2d1..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,830 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), BX - MOVQ 48(AX), SI - MOVQ 24(AX), R8 - MOVQ 32(AX), R9 - MOVQ (AX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ (R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 24(R10) - ORQ R13, R11 - - // exhausted += (br0.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 48(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 72(R10) - ORQ R13, R11 - - // exhausted += (br1.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 96(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 120(R10) - ORQ R13, R11 - - // exhausted += (br2.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 144(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 168(R10) - ORQ R13, R11 - - // exhausted += (br3.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVW AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x02, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R8 - MOVQ 32(CX), R9 - MOVQ (CX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ (R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 24(R10) - ORQ R14, R11 - - // exhausted += (br0.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 48(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 72(R10) - ORQ R14, R11 - - // exhausted += (br1.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 96(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 120(R10) - ORQ R14, R11 - - // exhausted += (br2.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 144(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 168(R10) - ORQ R14, R11 - - // exhausted += (br3.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVL AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17de6..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index 77ecd68e0..000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - srcLen int - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.srcLen = len(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c5121..000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579c4..000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe9e..000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2754bac6f..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { - if MaxEncodedLen(len(src)) > len(dst) { - return 0 - } - - // encodeBlock breaks on too big blocks, so split. - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 5a4412f90..000000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.19 - diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index 92e2347bb..000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 25ca98394..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) - b.bitsRead = 0 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - return - } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 1952f175b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 03744fbc7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,729 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc, if hasCRC is true. - checkCRC uint32 - hasCRC bool - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { - // byteBuf doesn't need a destination buffer. - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxCompressedBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - if debugDecoder { - println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - if compMode&3 != 0 { - return errors.New("corrupt block: reserved bits not zero") - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 32a7f401d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// encodeRLE will encode an RLE block. -func (b *blockEnc) encodeRLE(val byte, length uint32) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(length) - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, val) -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { - // Check common RLE cases. - seq := b.sequences[0] - if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { - // Offset == 1 and 0 or 1 literals. - b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) - return nil - } - } - - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 6) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Discard and encode as raw block. - b.output = b.encodeRawTo(b.output[:bhOffset], org) - b.popOffsets() - b.litEnc.Reuse = huff0.ReusePolicyNone - return nil - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 55a388553..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, io.ErrUnexpectedEOF - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := io.ReadFull(r.r, r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a242d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 6a5a2988b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - _, err := h.DecodeAndStrip(in) - return err -} - -// DecodeAndStrip will decode the header from the beginning of the stream -// and on success return the remaining bytes. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { - *h = Header{} - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if string(b) != frameMagic { - if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return nil, ErrMagicMismatch - } - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return in[4:], nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return nil, errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch len(b) { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch len(b) { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return in, nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return in, nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return in, nil -} - -// AppendTo will append the encoded header to the dst slice. -// There is no error checking performed on the header values. -func (h *Header) AppendTo(dst []byte) ([]byte, error) { - if h.Skippable { - magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} - magic[0] |= byte(h.SkippableID & 0xf) - dst = append(dst, magic[:]...) - f := h.SkippableSize - return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil - } - f := frameHeader{ - ContentSize: h.FrameContentSize, - WindowSize: uint32(h.WindowSize), - SingleSegment: h.SingleSegment, - Checksum: h.HasCheckSum, - DictID: h.DictionaryID, - } - return f.appendTo(dst), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index bbca17234..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,948 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - dicts map[uint32]*dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if err = d.setDict(frame); err != nil { - return nil, err - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if d.o.ignoreChecksum { - return true - } - - if len(next.b) > 0 { - d.current.crc.Write(next.b) - } - if next.err == nil && next.d != nil && next.d.hasCRC { - got := uint32(d.current.crc.Sum64()) - if got != next.d.checkCRC { - if debugDecoder { - printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - printf("CRC ok %08x\n", got) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err == nil { - d.current.err = d.setDict(d.frame) - } - if d.current.err != nil { - return false - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxCompressedBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil { - err = d.setDict(frame) - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.hasCRC = false - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if len(crc) < 4 { - if err == nil { - err = io.ErrUnexpectedEOF - - } - println("CRC missing?", err) - dec.err = err - } else { - dec.checkCRC = binary.LittleEndian.Uint32(crc) - dec.hasCRC = true - if debugDecoder { - printf("found crc to check: %08x\n", dec.checkCRC) - } - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} - -func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] - if ok { - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(dict) - } else if frame.DictionaryID != 0 { - // A zero or missing dictionary id is ambiguous: - // either dictionary zero, or no dictionary. In particular, - // zstd --patch-from uses this id for the source file, - // so only return an error if the dictionary id is not zero. - err = ErrUnknownDictionary - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 774c5f00f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math/bits" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []*dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// -// Each slice in dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// If several dictionaries with the same ID are provided, the last one will be used. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, d) - } - return nil - } -} - -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. -// The slice content can be arbitrary data. -func WithDecoderDictRaw(id uint32, content []byte) DOption { - return func(o *decoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index b7b83164b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,565 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte -} - -const dictMagic = "\x37\xa4\x30\xec" - -// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. -const dictMaxLength = 1 << 31 - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// ContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) ContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Content returns the dictionary content. -func (d *dict) Content() []byte { - if d == nil { - return nil - } - return d.content -} - -// Offsets returns the initial offsets. -func (d *dict) Offsets() [3]int { - if d == nil { - return [3]int{} - } - return d.offsets -} - -// LitEncoder returns the literal encoder. -func (d *dict) LitEncoder() *huff0.Scratch { - if d == nil { - return nil - } - return d.litEnc -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if string(b[:4]) != dictMagic { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, fmt.Errorf("loading literal table: %w", err) - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} - -// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. -func InspectDictionary(b []byte) (interface { - ID() uint32 - ContentSize() int - Content() []byte - Offsets() [3]int - LitEncoder() *huff0.Scratch -}, error) { - initPredefined() - d, err := loadDict(b) - return d, err -} - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - if len(block.sequences) == 0 { - continue - } - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if int(offset) >= len(o.History) { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - if nUsed == 0 { - nUsed = 1 - } - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - - // Ensure we aren't trying to represent RLE. - if maxCount == fakeLength { - for i := range hist { - if uint8(i) == maxSym { - fakeLength++ - maxSym++ - hist[i+1] = 1 - if maxSym > 1 { - break - } - } - if hist[0] == 0 { - fakeLength++ - hist[i] = 1 - if maxSym > 1 { - break - } - } - } - } - - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 5ca46038a..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,173 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - bufferReset int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > e.bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - e.blk.dictLitEnc = nil - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.ContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < e.bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 4613724e9..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = maxMatchLen * 8 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep) & 3) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [bestShortTableSize]prevEntry{} - e.longTable = [bestLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - const goodEnough = 250 - - cv := load6432(src, s) - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - // Set m to a match at offset if it looks like that will improve compression. - improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { - return - } - // Try to quick reject if we already have a long match. - if m.length > 16 { - left := len(src) - int(m.s+m.length) - // If we are too close to the end, keep as is. - if left <= 0 { - return - } - checkLen := m.length - (s - m.s) - 8 - if left > 2 && checkLen > 4 { - // Check 4 bytes, 4 bytes from the end of the current match. - a := load3232(src, offset+checkLen) - b := load3232(src, s+checkLen) - if a != b { - return - } - } - } - l := 4 + e.matchlen(s+4, offset+4, src) - if m.rep <= 0 { - // Extend candidate match backwards as far as possible. - // Do not extend repeats as we can assume they are optimal - // and offsets change if s == nextEmit. - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { - s-- - offset-- - l++ - } - } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { - panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - cand := match{offset: offset, s: s, length: l, rep: rep} - cand.estBits(bitsPerByte) - if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { - *m = cand - } - } - - best := match{s: s, est: highScore} - improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) - - if canRepeat && best.length < goodEnough { - if s == nextEmit { - // Check repeats straight after a match. - improve(&best, s-offset2, s, uint32(cv), 1|4) - improve(&best, s-offset3, s, uint32(cv), 2|4) - if offset1 > 1 { - improve(&best, s-(offset1-1), s, uint32(cv), 3|4) - } - } - - // If either no match or a non-repeat match, check at + 1 - if best.rep <= 0 { - cv32 := uint32(cv >> 8) - spp := s + 1 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - if best.rep < 0 { - cv32 = uint32(cv >> 24) - spp += 2 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - } - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - index0 := s + 1 - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - continue - } - - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s+1) - cv2 := load6432(src, s+2) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) - // Long at s+1, s+2 - improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) - improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) - if false { - // Short at s+3. - // Too often worse... - improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) - } - - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - // We cannot do this if we have already indexed this position. - const skipBeginning = 2 - if best.s > s-skipBeginning { - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - - if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - } - } - } - } - } - - if debugAsserts { - if best.offset >= best.s { - panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) - } - if best.s < nextEmit { - panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) - } - if best.offset < s-e.maxMatchOff { - panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) - } - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - s = best.s - if best.rep > 0 { - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - addLiterals(&seq, best.s) - - // Repeat. If bit 4 is set, this is a non-lit repeat. - seq.offset = uint32(best.rep & 3) - if debugSequences { - println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) - } - blk.sequences = append(blk.sequences, seq) - - // Index old s + 1 -> s - 1 - s = best.s + best.length - nextEmit = s - - // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - - switch best.rep { - case 2, 4 | 1: - offset1, offset2 = offset2, offset1 - case 3, 4 | 2: - offset1, offset2, offset3 = offset3, offset1, offset2 - case 4 | 3: - offset1, offset2, offset3 = offset1-1, offset1, offset2 - } - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Write our sequence - var seq seq - l := best.length - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - - // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } - - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - off++ - } - if s >= sLimit { - break encodeLoop - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index a4f5bf91f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1252 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [betterShortTableSize]tableEntry{} - e.longTable = [betterLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index a154c18f7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [dFastShortTableSize]tableEntry{} - e.longTable = [dFastLongTableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f45a3da7d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxCompressedBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [tableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 72af7ef0f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - "math" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst := fh.appendTo(tmp[:0]) - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.err != nil { - return s.err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.writeErr != nil { - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst = fh.appendTo(dst) - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - oldout := blk.output - // Output directly to dst - blk.output = dst - - err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = blk.output - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = append(dst, blk.output...) - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} - -// MaxEncodedSize returns the expected maximum -// size of an encoded block or stream. -func (e *Encoder) MaxEncodedSize(size int) int { - frameHeader := 4 + 2 // magic + frame header & window descriptor - if e.o.dict != nil { - frameHeader += 4 - } - // Frame content size: - if size < 256 { - frameHeader++ - } else if size < 65536+256 { - frameHeader += 2 - } else if size < math.MaxInt32 { - frameHeader += 4 - } else { - frameHeader += 8 - } - // Final crc - if e.o.crc { - frameHeader += 4 - } - - // Max overhead is 3 bytes/block. - // There cannot be 0 blocks. - blocks := (size + e.o.blockSize) / e.o.blockSize - - // Combine, add padding. - maxSz := frameHeader + 3*blocks + size - if e.o.pad > 1 { - maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) - } - return maxSz -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index 20671dcb9..000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,339 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: false, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level and max 8MB. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - n = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 8 << 20 - case SpeedBestCompression: - o.windowSize = 8 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedDefault - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// -// The slice dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// The encoder *may* choose to use no dictionary instead for certain payloads. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} - -// WithEncoderDictRaw registers a dictionary that may be used by the encoder. -// -// The slice content may contain arbitrary data. It will be used as an initial -// history. -func WithEncoderDictRaw(id uint32, content []byte) EOption { - return func(o *encoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index 53e160f7e..000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -const ( - frameMagic = "\x28\xb5\x2f\xfd" - skippableFrameMagic = "\x2a\x4d\x18" -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - case nil: - signature[0] = b[0] - default: - return err - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - case nil: - copy(signature[1:], b) - default: - return err - } - - if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if string(signature[:]) != frameMagic { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch len(b) { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - d.DictionaryID = id - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch len(b) { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or window size below 2MB. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - if d.o.lowMem { - // Alloc with 1MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 - } else { - // Alloc with 2MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum, assuming the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - // We can overwrite upper tmp now - buf, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - want := binary.LittleEndian.Uint32(buf[:4]) - got := uint32(d.crc.Sum64()) - - if got != want { - if debugDecoder { - printf("CRC check failed: got %08x, want %08x\n", got, want) - } - return ErrCRCMismatch - } - if debugDecoder { - printf("CRC ok %08x\n", got) - } - return nil -} - -// consumeCRC skips over the checksum, assuming the frame has one. -func (d *frameDec) consumeCRC() error { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - } - return err -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - d.crc.Write(dst[crcStart:]) - err = d.checkCRC() - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 667ca0679..000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) []byte { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are not stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a72..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829b0..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde39869..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 8adfebb02..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - v = 1 - } - symbolNext[i] = uint16(v) - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - for { - // lowprob area - position = (position + step) & tableMask - if position <= highThreshold { - break - } - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326a8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21eb..000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 09164856d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 777290d44..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index fc40c8200..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index ddb63aa91..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,210 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index ae7d4d329..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,184 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(s *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD s+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index d4221edf4..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 0be16cefc..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb10..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b7a..000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 0782b86e3..000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31c0..000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index d7fe6d82d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - if debugDecoder { - println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") - } - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fill() - } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index 8adabd828..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,394 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - "io" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorOverread: - return true, io.ErrUnexpectedEOF - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// error reported when bits are overread. -const errorOverread = 6 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - if debugDecoder { - println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - case errorOverread: - return io.ErrUnexpectedEOF - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if debugDecoder { - println("decode: ", br.remain(), "bits remain on stream. code:", errCode) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index 5b06174b8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4151 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index 2fb35b788..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index ec13594e8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - var n int - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8c4..000000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 4be7cc736..000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,121 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 76577dc78..3011efb57 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,50 @@ +## 2.21.0 + + + ### Features + - add support for GINKGO_TIME_FORMAT [a69eb39] + - add GINKGO_NO_COLOR to disable colors via environment variables [bcab9c8] + + ### Fixes + - increase threshold in timeline matcher [e548367] + - Fix the document by replacing `SpecsThatWillBeRun` with `SpecsThatWillRun` + [c2c4d3c] + + ### Maintenance + - bump various dependencies [7e65a00] + +## 2.20.2 + +Require Go 1.22+ + +### Maintenance +- bump go to v1.22 [a671816] + +## 2.20.1 + +### Fixes +- make BeSpecEvent duration matcher more forgiving [d6f9640] + +## 2.20.0 + +### Features +- Add buildvcs flag [be5ab95] + +### Maintenance +- Add update-deps to makefile [d303d14] +- bump all dependencies [7a50221] + +## 2.19.1 + +### Fixes +- update supported platforms for race conditions [63c8c30] +- [build] Allow custom name for binaries. [ff41e27] + +### Maintenance +- bump gomega [76f4e0c] +- Bump rexml from 3.2.6 to 3.2.8 in /docs (#1417) [b69c00d] +- Bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1425) [f097741] + ## 2.19.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile index cb099aff9..06dff97cd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/Makefile +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -4,8 +4,13 @@ all: vet test .PHONY: test test: - go run github.com/onsi/ginkgo/v2/ginkgo -r -p + go run github.com/onsi/ginkgo/v2/ginkgo -r -p -randomize-all -keep-going .PHONY: vet vet: go vet ./... + +.PHONY: update-deps +update-deps: + go get -u ./... + go mod tidy \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 743555dde..4d5749114 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter { return fmt.Sprintf("\x1b[38;5;%dm", colorCode) } + if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor { + colorMode = ColorModeNone + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 5db5d1a7b..fd1726084 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -2,6 +2,8 @@ package build import ( "fmt" + "os" + "path" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" @@ -53,7 +55,18 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - fmt.Printf("Compiled %s.test\n", suite.PackageName) + if len(goFlagsConfig.O) == 0 { + goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") + } else { + stat, err := os.Stat(goFlagsConfig.O) + if err != nil { + panic(err) + } + if stat.IsDir() { + goFlagsConfig.O += "/" + suite.PackageName + ".test" + } + } + fmt.Printf("Compiled %s\n", goFlagsConfig.O) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 86da7340d..48827cc5e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 66463cf5e..8c0dfab8c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -202,6 +202,7 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string + BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -219,6 +220,7 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool + O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -326,7 +328,7 @@ var ParallelConfigFlags = GinkgoFlags{ // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", - Usage: "If set, suppress color output in default reporter."}, + Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -511,7 +513,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", @@ -527,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", @@ -561,6 +565,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI @@ -614,7 +620,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -637,7 +643,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", "-o", destination, packageToBuild} + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, map[string]interface{}{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index aae69b04c..ddcbec1ba 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -3,13 +3,21 @@ package types import ( "encoding/json" "fmt" + "os" "sort" "strings" "time" ) const GINKGO_FOCUS_EXIT_CODE = 197 -const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +func init() { + if os.Getenv("GINKGO_TIME_FORMAT") != "" { + GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT") + } +} // Report captures information about a Ginkgo test run type Report struct { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index acab03492..caf3c9f5e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.19.0" +const VERSION = "2.21.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9572e5fa6..9f6090b8d 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,36 @@ +## 1.35.1 + +### Fixes +- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1] + +## 1.35.0 + +### Features + +- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265] +- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931] + +### Fixes + +- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e] + +### Maintenance + +- Bump all dependencies [a05a416] + +## 1.34.2 + +Require Go 1.22+ + +### Maintenance +- bump ginkgo as well [c59c6dc] +- bump to go 1.22 - remove x/exp dependency [8158b99] + +## 1.34.1 + +### Maintenance +- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd] + ## 1.34.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 650ae8672..1038d7dd4 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.34.0" +const GOMEGA_VERSION = "1.35.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -319,7 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. + +By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: + + Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) + +now either the context cacnellation or the timeout will cause Eventually to stop polling. + +If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: + + EnforceDefaultTimeoutsWhenUsingContexts() + +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -491,6 +503,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { Default.SetDefaultConsistentlyPollingInterval(t) } +// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided. +func EnforceDefaultTimeoutsWhenUsingContexts() { + Default.EnforceDefaultTimeoutsWhenUsingContexts() +} + +// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`. +func DisableDefaultTimeoutsWhenUsingContext() { + Default.DisableDefaultTimeoutsWhenUsingContext() +} + // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against // the matcher passed to the Should and ShouldNot methods. // diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index cde9e2ec8..8b4cd1f5b 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -335,7 +335,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { if assertion.asyncType == AsyncAssertionTypeConsistently { return time.After(assertion.g.DurationBundle.ConsistentlyDuration) } else { - if assertion.ctx == nil { + if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts { return time.After(assertion.g.DurationBundle.EventuallyTimeout) } else { return nil @@ -496,7 +496,15 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch for _, err := range []error{actualErr, matcherErr} { if pollingSignalErr, ok := AsPollingSignalError(err); ok { if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") + if pollingSignalErr.IsSuccessful() { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)") + } else { + return true // early escape hatch for Consistently + } + } else { + fail("Told to stop trying") + } return false } if pollingSignalErr.IsTryAgainAfter() { diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 6e0d90d3a..2e026c336 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -8,10 +8,11 @@ import ( ) type DurationBundle struct { - EventuallyTimeout time.Duration - EventuallyPollingInterval time.Duration - ConsistentlyDuration time.Duration - ConsistentlyPollingInterval time.Duration + EventuallyTimeout time.Duration + EventuallyPollingInterval time.Duration + ConsistentlyDuration time.Duration + ConsistentlyPollingInterval time.Duration + EnforceDefaultTimeoutsWhenUsingContexts bool } const ( @@ -20,15 +21,19 @@ const ( ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" + + EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS" ) func FetchDefaultDurationBundle() DurationBundle { + _, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName) return DurationBundle{ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second), EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond), - ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), - ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), + ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts, } } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index de1f4f336..c6e2fcc0e 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -127,3 +127,11 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) { func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) { g.DurationBundle.ConsistentlyPollingInterval = t } + +func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true +} + +func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false +} diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 83b04b1a4..3a4f7ddd9 100644 --- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -17,6 +17,7 @@ type PollingSignalError interface { error Wrap(err error) PollingSignalError Attach(description string, obj any) PollingSignalError + Successfully() PollingSignalError Now() } @@ -45,6 +46,7 @@ type PollingSignalErrorImpl struct { wrappedErr error pollingSignalErrorType PollingSignalErrorType duration time.Duration + successful bool Attachments []PollingSignalErrorAttachment } @@ -73,6 +75,11 @@ func (s *PollingSignalErrorImpl) Unwrap() error { return s.wrappedErr } +func (s *PollingSignalErrorImpl) Successfully() PollingSignalError { + s.successful = true + return s +} + func (s *PollingSignalErrorImpl) Now() { panic(s) } @@ -81,6 +88,10 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying } +func (s *PollingSignalErrorImpl) IsSuccessful() bool { + return s.successful +} + func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter } diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 6989f78c4..8dd3f871a 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (interface{}, error) { +func extractField(actual interface{}, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -64,36 +64,46 @@ func extractField(actual interface{}, field string, matchername string) (interfa type HaveFieldMatcher struct { Field string Expected interface{} +} - extractedField interface{} - expectedMatcher omegaMatcher +func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { + var isMatcher bool + expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher) + if !isMatcher { + expectedMatcher = &EqualMatcher{Expected: matcher.Expected} + } + return expectedMatcher } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") + extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } - var isMatcher bool - matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher) - if !isMatcher { - matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected} - } - - return matcher.expectedMatcher.Match(matcher.extractedField) + return matcher.expectedMatcher().Match(extractedField) } func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field) - message += matcher.expectedMatcher.FailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().FailureMessage(extractedField) return message } func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field) - message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().NegatedFailureMessage(extractedField) return message } diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 7c7adb941..30f2beed3 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -29,6 +29,8 @@ type Gomega interface { SetDefaultEventuallyPollingInterval(time.Duration) SetDefaultConsistentlyDuration(time.Duration) SetDefaultConsistentlyPollingInterval(time.Duration) + EnforceDefaultTimeoutsWhenUsingContexts() + DisableDefaultTimeoutsWhenUsingContext() } // All Gomega matchers must implement the GomegaMatcher interface diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml index 64887a08b..7c15f83e3 100644 --- a/vendor/github.com/openshift/api/.ci-operator.yaml +++ b/vendor/github.com/openshift/api/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.22-openshift-4.18 + tag: rhel-9-release-golang-1.23-openshift-4.19 diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml new file mode 100644 index 000000000..848960e94 --- /dev/null +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -0,0 +1,25 @@ +linters-settings: + custom: + kal: + type: "module" + description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enable: + - "maxlength" + - "nobools" + - "statussubresource" + lintersConfig: + conditions: + isFirstField: Warn + useProtobuf: Ignore + usePatchStrategy: Ignore +linters: + disable-all: true + enable: + - kal +issues: + # We have a lot of existing issues. + # Want to make sure that those adding new fields have an + # opportunity to fix them when running the linter locally. + max-issues-per-linter: 1000 diff --git a/vendor/github.com/openshift/api/Dockerfile.ocp b/vendor/github.com/openshift/api/Dockerfile.ocp index f815fa9cf..8d50096ed 100644 --- a/vendor/github.com/openshift/api/Dockerfile.ocp +++ b/vendor/github.com/openshift/api/Dockerfile.ocp @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.18 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.18:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 5e6a6b131..6982e4efe 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -4,7 +4,7 @@ all: build update: update-codegen-crds RUNTIME ?= podman -RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.22-openshift-4.17 +RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.23-openshift-4.19 EXCLUDE_DIRS := _output/ dependencymagnet/ hack/ third_party/ tls/ tools/ vendor/ tests/ GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out $(EXCLUDE_DIRS), $(wildcard */)))) @@ -39,6 +39,24 @@ update-codegen-crds: update-scripts # ##################### +# When not otherwise set, diff/lint against the local master branch +PULL_BASE_SHA ?= master + +.PHONY: lint +lint: + hack/golangci-lint.sh run --new-from-rev=${PULL_BASE_SHA} ${EXTRA_ARGS} + +.PHONY: lint-fix +lint-fix: EXTRA_ARGS=--fix +lint-fix: lint + +# Ignore the exit code of the fix lint, it will always error as there are unfixed issues +# that cannot be fixed from historic commits. +.PHONY: verify-lint-fix +verify-lint-fix: + make lint-fix 2>/dev/null || true + git diff --exit-code + .PHONY: verify-scripts verify-scripts: bash -x hack/verify-deepcopy.sh @@ -53,10 +71,9 @@ verify-scripts: bash -x hack/verify-prerelease-lifecycle-gen.sh hack/verify-payload-crds.sh hack/verify-payload-featuregates.sh - hack/verify-promoted-features-pass-tests.sh .PHONY: verify -verify: verify-scripts verify-crd-schema verify-codegen-crds +verify: verify-scripts lint verify-crd-schema verify-codegen-crds .PHONY: verify-codegen-crds verify-codegen-crds: @@ -66,6 +83,10 @@ verify-codegen-crds: verify-crd-schema: bash -x hack/verify-crd-schema-checker.sh +.PHONY: verify-feature-promotion +verify-feature-promotion: + hack/verify-promoted-features-pass-tests.sh + .PHONY: verify-% verify-%: make $* diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS index 2e956a47d..ff904b63a 100644 --- a/vendor/github.com/openshift/api/OWNERS +++ b/vendor/github.com/openshift/api/OWNERS @@ -1,19 +1,7 @@ reviewers: - deads2k - - derekwaynecarr - JoelSpeed - - knobunc - - sjenning - - mfojtik - - soltysh - - bparees + - everettraven approvers: - - bparees - deads2k - - derekwaynecarr - JoelSpeed - - knobunc - - mfojtik - - sjenning - - soltysh - - spadgett diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 2f503a88d..934bcd329 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -111,6 +111,25 @@ conventions](https://github.com/openshift/enhancements/blob/master/CONVENTIONS.m and then follow the instructions below to regenerate CRDs (if necessary) and submit a pull request with your new API definitions and generated files. +New APIs (new CRDs) must be added first as an unstable API (v1alpha1). +Once the feature is more developed, and ready to be promoted to stable, the API can be promoted to v1. + +### Why do we start with v1alpha1? + +By starting an API as a v1alpha1, we can iterate on the API with the ability to make breaking changes. +We can make changes to the schema, change validations, change entire types and even serialization without worry. + +When changes are made to an API, any existing client code will need to be updated to match. +If there are breaking changes (such as changing the serialization), then this requires a new version of the API. + +If we did not bump the API version for each breaking change, a client, generated prior to the breaking change, +would panic when it tried to deserialize the new serialization of the API. + +If, during development of a feature, we need to make a breaking change, we should move the feature to v1alpha2 (or v1alpha3, etc), +until we reach a version that we are happy to promote to v1. + +Do not make changes to the API when promoting the feature to v1. + ### Adding a new stable API (v1) When copying, it matters which `// +foo` markers are two comments blocks up and which are one comment block up. diff --git a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go index eb4918a66..3771fa21d 100644 --- a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go +++ b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go @@ -35,7 +35,6 @@ type APIRequestCount struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec defines the characteristics of the resource. - // +kubebuilder:validation:Required // +required Spec APIRequestCountSpec `json:"spec"` @@ -58,9 +57,10 @@ type APIRequestCountSpec struct { type APIRequestCountStatus struct { // conditions contains details of the current status of this API Resource. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions"` // removedInRelease is when the API will be removed. // +kubebuilder:validation:MinLength=0 @@ -126,7 +126,7 @@ type PerNodeAPIRequestLog struct { // PerUserAPIRequestCount contains logs of a user's requests. type PerUserAPIRequestCount struct { - // userName that made the request. + // username that made the request. // +kubebuilder:validation:MaxLength=512 UserName string `json:"username"` diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go index 27d74b6c1..b3d6b615f 100644 --- a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go @@ -74,7 +74,7 @@ func (PerResourceAPIRequestLog) SwaggerDoc() map[string]string { var map_PerUserAPIRequestCount = map[string]string{ "": "PerUserAPIRequestCount contains logs of a user's requests.", - "username": "userName that made the request.", + "username": "username that made the request.", "userAgent": "userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change.", "requestCount": "requestCount of requests by the user across all verbs.", "byVerb": "byVerb details by verb.", diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto index 010f36b95..6f50fcaf9 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.proto +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -15,39 +15,39 @@ option go_package = "github.com/openshift/api/apps/v1"; // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. message CustomDeploymentStrategyParams { - // Image specifies a container image which can carry out a deployment. + // image specifies a container image which can carry out a deployment. optional string image = 1; - // Environment holds the environment which will be given to the container for Image. + // environment holds the environment which will be given to the container for Image. repeated .k8s.io.api.core.v1.EnvVar environment = 2; - // Command is optional and overrides CMD in the container Image. + // command is optional and overrides CMD in the container Image. repeated string command = 3; } // DeploymentCause captures information about a particular cause of a deployment. message DeploymentCause { - // Type of the trigger that resulted in the creation of a new deployment + // type of the trigger that resulted in the creation of a new deployment optional string type = 1; - // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + // imageTrigger contains the image trigger details, if this trigger was fired based on an image change optional DeploymentCauseImageTrigger imageTrigger = 2; } // DeploymentCauseImageTrigger represents details about the cause of a deployment originating // from an image change trigger message DeploymentCauseImageTrigger { - // From is a reference to the changed object which triggered a deployment. The field may have + // from is a reference to the changed object which triggered a deployment. The field may have // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. optional .k8s.io.api.core.v1.ObjectReference from = 1; } // DeploymentCondition describes the state of a deployment config at a certain point. message DeploymentCondition { - // Type of deployment condition. + // type of deployment condition. optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; // The last time this condition was updated. @@ -81,10 +81,10 @@ message DeploymentConfig { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec represents a desired deployment state and how to deploy to it. + // spec represents a desired deployment state and how to deploy to it. optional DeploymentConfigSpec spec = 2; - // Status represents the current deployment state. + // status represents the current deployment state. // +optional optional DeploymentConfigStatus status = 3; } @@ -98,7 +98,7 @@ message DeploymentConfigList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of deployment configs + // items is a list of deployment configs repeated DeploymentConfig items = 2; } @@ -107,108 +107,108 @@ message DeploymentConfigList { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message DeploymentConfigRollback { - // Name of the deployment config that will be rolled back. + // name of the deployment config that will be rolled back. optional string name = 1; - // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + // updatedAnnotations is a set of new annotations that will be added in the deployment config. map updatedAnnotations = 2; - // Spec defines the options to rollback generation. + // spec defines the options to rollback generation. optional DeploymentConfigRollbackSpec spec = 3; } // DeploymentConfigRollbackSpec represents the options for rollback generation. message DeploymentConfigRollbackSpec { - // From points to a ReplicationController which is a deployment. + // from points to a ReplicationController which is a deployment. optional .k8s.io.api.core.v1.ObjectReference from = 1; - // Revision to rollback to. If set to 0, rollback to the last revision. + // revision to rollback to. If set to 0, rollback to the last revision. optional int64 revision = 2; - // IncludeTriggers specifies whether to include config Triggers. + // includeTriggers specifies whether to include config Triggers. optional bool includeTriggers = 3; - // IncludeTemplate specifies whether to include the PodTemplateSpec. + // includeTemplate specifies whether to include the PodTemplateSpec. optional bool includeTemplate = 4; - // IncludeReplicationMeta specifies whether to include the replica count and selector. + // includeReplicationMeta specifies whether to include the replica count and selector. optional bool includeReplicationMeta = 5; - // IncludeStrategy specifies whether to include the deployment Strategy. + // includeStrategy specifies whether to include the deployment Strategy. optional bool includeStrategy = 6; } // DeploymentConfigSpec represents the desired state of the deployment. message DeploymentConfigSpec { - // Strategy describes how a deployment is executed. + // strategy describes how a deployment is executed. // +optional optional DeploymentStrategy strategy = 1; - // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // minReadySeconds is the minimum number of seconds for which a newly created pod should // be ready without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) optional int32 minReadySeconds = 9; - // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers // are defined, a new deployment can only occur as a result of an explicit client update to the // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. // +optional optional DeploymentTriggerPolicies triggers = 2; - // Replicas is the number of desired replicas. + // replicas is the number of desired replicas. // +optional optional int32 replicas = 3; - // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. // This field is a pointer to allow for differentiation between an explicit zero and not specified. // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) optional int32 revisionHistoryLimit = 4; - // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. // +optional optional bool test = 5; - // Paused indicates that the deployment config is paused resulting in no new deployments on template + // paused indicates that the deployment config is paused resulting in no new deployments on template // changes or changes in the template caused by other triggers. optional bool paused = 6; - // Selector is a label query over pods that should match the Replicas count. + // selector is a label query over pods that should match the Replicas count. map selector = 7; - // Template is the object that describes the pod that will be created if + // template is the object that describes the pod that will be created if // insufficient replicas are detected. optional .k8s.io.api.core.v1.PodTemplateSpec template = 8; } // DeploymentConfigStatus represents the current deployment state. message DeploymentConfigStatus { - // LatestVersion is used to determine whether the current deployment associated with a deployment + // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. optional int64 latestVersion = 1; - // ObservedGeneration is the most recent generation observed by the deployment config controller. + // observedGeneration is the most recent generation observed by the deployment config controller. optional int64 observedGeneration = 2; - // Replicas is the total number of pods targeted by this deployment config. + // replicas is the total number of pods targeted by this deployment config. optional int32 replicas = 3; - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. optional int32 updatedReplicas = 4; - // AvailableReplicas is the total number of available pods targeted by this deployment config. + // availableReplicas is the total number of available pods targeted by this deployment config. optional int32 availableReplicas = 5; - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. optional int32 unavailableReplicas = 6; - // Details are the reasons for the update to this deployment config. + // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger optional DeploymentDetails details = 7; - // Conditions represents the latest available observations of a deployment config's current state. + // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge repeated DeploymentCondition conditions = 8; @@ -219,10 +219,10 @@ message DeploymentConfigStatus { // DeploymentDetails captures information about the causes of a deployment. message DeploymentDetails { - // Message is the user specified change message, if this deployment was triggered manually by the user + // message is the user specified change message, if this deployment was triggered manually by the user optional string message = 1; - // Causes are extended data associated with all the causes for creating a new deployment + // causes are extended data associated with all the causes for creating a new deployment repeated DeploymentCause causes = 2; } @@ -241,7 +241,7 @@ message DeploymentLogOptions { // The container for which to stream logs. Defaults to only container if there is one container in the pod. optional string container = 1; - // Follow if true indicates that the build log should be streamed until + // follow if true indicates that the build log should be streamed until // the build terminates. optional bool follow = 2; @@ -273,12 +273,12 @@ message DeploymentLogOptions { // slightly more or slightly less than the specified limit. optional int64 limitBytes = 8; - // NoWait if true causes the call to return immediately even if the deployment + // nowait if true causes the call to return immediately even if the deployment // is not available yet. Otherwise the server will wait until the deployment has started. // TODO: Fix the tag to 'noWait' in v2 optional bool nowait = 9; - // Version of the deployment for which to view logs. + // version of the deployment for which to view logs. optional int64 version = 10; } @@ -287,17 +287,17 @@ message DeploymentLogOptions { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message DeploymentRequest { - // Name of the deployment config for requesting a new deployment. + // name of the deployment config for requesting a new deployment. optional string name = 1; - // Latest will update the deployment config with the latest state from all triggers. + // latest will update the deployment config with the latest state from all triggers. optional bool latest = 2; - // Force will try to force a new deployment to run. If the deployment config is paused, + // force will try to force a new deployment to run. If the deployment config is paused, // then setting this to true will return an Invalid error. optional bool force = 3; - // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // excludeTriggers instructs the instantiator to avoid processing the specified triggers. // This field overrides the triggers from latest and allows clients to control specific // logic. This field is ignored if not specified. repeated string excludeTriggers = 4; @@ -305,53 +305,53 @@ message DeploymentRequest { // DeploymentStrategy describes how to perform a deployment. message DeploymentStrategy { - // Type is the name of a deployment strategy. + // type is the name of a deployment strategy. // +optional optional string type = 1; - // CustomParams are the input to the Custom deployment strategy, and may also + // customParams are the input to the Custom deployment strategy, and may also // be specified for the Recreate and Rolling strategies to customize the execution // process that runs the deployment. optional CustomDeploymentStrategyParams customParams = 2; - // RecreateParams are the input to the Recreate deployment strategy. + // recreateParams are the input to the Recreate deployment strategy. optional RecreateDeploymentStrategyParams recreateParams = 3; - // RollingParams are the input to the Rolling deployment strategy. + // rollingParams are the input to the Rolling deployment strategy. optional RollingDeploymentStrategyParams rollingParams = 4; - // Resources contains resource requirements to execute the deployment and any hooks. + // resources contains resource requirements to execute the deployment and any hooks. optional .k8s.io.api.core.v1.ResourceRequirements resources = 5; - // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. map labels = 6; - // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. map annotations = 7; - // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment // config may be active on a node before the system actively tries to terminate them. optional int64 activeDeadlineSeconds = 8; } // DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. message DeploymentTriggerImageChangeParams { - // Automatic means that the detection of a new tag value should result in an image update + // automatic means that the detection of a new tag value should result in an image update // inside the pod template. optional bool automatic = 1; - // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // containerNames is used to restrict tag updates to the specified set of container names in a pod. // If multiple triggers point to the same containers, the resulting behavior is undefined. Future // API versions will make this a validation error. If ContainerNames does not point to a valid container, // the trigger will be ignored. Future API versions will make this a validation error. repeated string containerNames = 2; - // From is a reference to an image stream tag to watch for changes. From.Name is the only + // from is a reference to an image stream tag to watch for changes. From.Name is the only // required subfield - if From.Namespace is blank, the namespace of the current deployment // trigger will be used. optional .k8s.io.api.core.v1.ObjectReference from = 3; - // LastTriggeredImage is the last image to be triggered. + // lastTriggeredImage is the last image to be triggered. optional string lastTriggeredImage = 4; } @@ -366,10 +366,10 @@ message DeploymentTriggerPolicies { // DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. message DeploymentTriggerPolicy { - // Type of the trigger + // type of the trigger optional string type = 1; - // ImageChangeParams represents the parameters for the ImageChange trigger. + // imageChangeParams represents the parameters for the ImageChange trigger. optional DeploymentTriggerImageChangeParams imageChangeParams = 2; } @@ -377,17 +377,17 @@ message DeploymentTriggerPolicy { // based on the specified container which is assumed to be part of the // deployment template. message ExecNewPodHook { - // Command is the action command and its arguments. + // command is the action command and its arguments. repeated string command = 1; - // Env is a set of environment variables to supply to the hook pod's container. + // env is a set of environment variables to supply to the hook pod's container. repeated .k8s.io.api.core.v1.EnvVar env = 2; - // ContainerName is the name of a container in the deployment pod template + // containerName is the name of a container in the deployment pod template // whose container image will be used for the hook pod's container. optional string containerName = 3; - // Volumes is a list of named volumes from the pod template which should be + // volumes is a list of named volumes from the pod template which should be // copied to the hook pod. Volumes names not found in pod spec are ignored. // An empty list means no volumes will be copied. repeated string volumes = 4; @@ -395,32 +395,32 @@ message ExecNewPodHook { // LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. message LifecycleHook { - // FailurePolicy specifies what action to take if the hook fails. + // failurePolicy specifies what action to take if the hook fails. optional string failurePolicy = 1; - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + // execNewPod specifies the options for a lifecycle hook backed by a pod. optional ExecNewPodHook execNewPod = 2; - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. repeated TagImageHook tagImages = 3; } // RecreateDeploymentStrategyParams are the input to the Recreate deployment // strategy. message RecreateDeploymentStrategyParams { - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. optional int64 timeoutSeconds = 1; - // Pre is a lifecycle hook which is executed before the strategy manipulates + // pre is a lifecycle hook which is executed before the strategy manipulates // the deployment. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook pre = 2; - // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new // pod is created. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook mid = 3; - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook post = 4; } @@ -428,19 +428,19 @@ message RecreateDeploymentStrategyParams { // RollingDeploymentStrategyParams are the input to the Rolling deployment // strategy. message RollingDeploymentStrategyParams { - // UpdatePeriodSeconds is the time to wait between individual pod updates. + // updatePeriodSeconds is the time to wait between individual pod updates. // If the value is nil, a default will be used. optional int64 updatePeriodSeconds = 1; - // IntervalSeconds is the time to wait between polling deployment status + // intervalSeconds is the time to wait between polling deployment status // after update. If the value is nil, a default will be used. optional int64 intervalSeconds = 2; - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. optional int64 timeoutSeconds = 3; - // MaxUnavailable is the maximum number of pods that can be unavailable + // maxUnavailable is the maximum number of pods that can be unavailable // during the update. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of update (ex: 10%). Absolute // number is calculated from percentage by rounding down. @@ -454,7 +454,7 @@ message RollingDeploymentStrategyParams { // all times during the update. optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4; - // MaxSurge is the maximum number of pods that can be scheduled above the + // maxSurge is the maximum number of pods that can be scheduled above the // original number of pods. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. @@ -468,11 +468,11 @@ message RollingDeploymentStrategyParams { // pods. optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5; - // Pre is a lifecycle hook which is executed before the deployment process + // pre is a lifecycle hook which is executed before the deployment process // begins. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook pre = 7; - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values // are supported. optional LifecycleHook post = 8; @@ -480,11 +480,11 @@ message RollingDeploymentStrategyParams { // TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. message TagImageHook { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single // container this value will be defaulted to the name of that container. optional string containerName = 1; - // To is the target ImageStreamTag to set the container's image onto. + // to is the target ImageStreamTag to set the container's image onto. optional .k8s.io.api.core.v1.ObjectReference to = 2; } diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go index 1465aea27..619c30e82 100644 --- a/vendor/github.com/openshift/api/apps/v1/types.go +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -38,81 +38,81 @@ type DeploymentConfig struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec represents a desired deployment state and how to deploy to it. + // spec represents a desired deployment state and how to deploy to it. Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status represents the current deployment state. + // status represents the current deployment state. // +optional Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DeploymentConfigSpec represents the desired state of the deployment. type DeploymentConfigSpec struct { - // Strategy describes how a deployment is executed. + // strategy describes how a deployment is executed. // +optional Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` - // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // minReadySeconds is the minimum number of seconds for which a newly created pod should // be ready without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` - // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers // are defined, a new deployment can only occur as a result of an explicit client update to the // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. // +optional Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` - // Replicas is the number of desired replicas. + // replicas is the number of desired replicas. // +optional Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. // This field is a pointer to allow for differentiation between an explicit zero and not specified. // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` - // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. // +optional Test bool `json:"test" protobuf:"varint,5,opt,name=test"` - // Paused indicates that the deployment config is paused resulting in no new deployments on template + // paused indicates that the deployment config is paused resulting in no new deployments on template // changes or changes in the template caused by other triggers. Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` - // Selector is a label query over pods that should match the Replicas count. + // selector is a label query over pods that should match the Replicas count. Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` - // Template is the object that describes the pod that will be created if + // template is the object that describes the pod that will be created if // insufficient replicas are detected. Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` } // DeploymentStrategy describes how to perform a deployment. type DeploymentStrategy struct { - // Type is the name of a deployment strategy. + // type is the name of a deployment strategy. // +optional Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - // CustomParams are the input to the Custom deployment strategy, and may also + // customParams are the input to the Custom deployment strategy, and may also // be specified for the Recreate and Rolling strategies to customize the execution // process that runs the deployment. CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` - // RecreateParams are the input to the Recreate deployment strategy. + // recreateParams are the input to the Recreate deployment strategy. RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` - // RollingParams are the input to the Rolling deployment strategy. + // rollingParams are the input to the Rolling deployment strategy. RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` - // Resources contains resource requirements to execute the deployment and any hooks. + // resources contains resource requirements to execute the deployment and any hooks. Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` - // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` - // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"` - // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment // config may be active on a node before the system actively tries to terminate them. ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"` } @@ -131,27 +131,27 @@ const ( // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. type CustomDeploymentStrategyParams struct { - // Image specifies a container image which can carry out a deployment. + // image specifies a container image which can carry out a deployment. Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` - // Environment holds the environment which will be given to the container for Image. + // environment holds the environment which will be given to the container for Image. Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` - // Command is optional and overrides CMD in the container Image. + // command is optional and overrides CMD in the container Image. Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` } // RecreateDeploymentStrategyParams are the input to the Recreate deployment // strategy. type RecreateDeploymentStrategyParams struct { - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` - // Pre is a lifecycle hook which is executed before the strategy manipulates + // pre is a lifecycle hook which is executed before the strategy manipulates // the deployment. All LifecycleHookFailurePolicy values are supported. Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"` - // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new // pod is created. All LifecycleHookFailurePolicy values are supported. Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"` - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` } @@ -159,16 +159,16 @@ type RecreateDeploymentStrategyParams struct { // RollingDeploymentStrategyParams are the input to the Rolling deployment // strategy. type RollingDeploymentStrategyParams struct { - // UpdatePeriodSeconds is the time to wait between individual pod updates. + // updatePeriodSeconds is the time to wait between individual pod updates. // If the value is nil, a default will be used. UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"` - // IntervalSeconds is the time to wait between polling deployment status + // intervalSeconds is the time to wait between polling deployment status // after update. If the value is nil, a default will be used. IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"` - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` - // MaxUnavailable is the maximum number of pods that can be unavailable + // maxUnavailable is the maximum number of pods that can be unavailable // during the update. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of update (ex: 10%). Absolute // number is calculated from percentage by rounding down. @@ -181,7 +181,7 @@ type RollingDeploymentStrategyParams struct { // ensuring that at least 70% of original number of pods are available at // all times during the update. MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"` - // MaxSurge is the maximum number of pods that can be scheduled above the + // maxSurge is the maximum number of pods that can be scheduled above the // original number of pods. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. @@ -194,10 +194,10 @@ type RollingDeploymentStrategyParams struct { // pods running at any time during the update is atmost 130% of original // pods. MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` - // Pre is a lifecycle hook which is executed before the deployment process + // pre is a lifecycle hook which is executed before the deployment process // begins. All LifecycleHookFailurePolicy values are supported. Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values // are supported. Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` @@ -205,13 +205,13 @@ type RollingDeploymentStrategyParams struct { // LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. type LifecycleHook struct { - // FailurePolicy specifies what action to take if the hook fails. + // failurePolicy specifies what action to take if the hook fails. FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + // execNewPod specifies the options for a lifecycle hook backed by a pod. ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` } @@ -231,14 +231,14 @@ const ( // based on the specified container which is assumed to be part of the // deployment template. type ExecNewPodHook struct { - // Command is the action command and its arguments. + // command is the action command and its arguments. Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` - // Env is a set of environment variables to supply to the hook pod's container. + // env is a set of environment variables to supply to the hook pod's container. Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // ContainerName is the name of a container in the deployment pod template + // containerName is the name of a container in the deployment pod template // whose container image will be used for the hook pod's container. ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` - // Volumes is a list of named volumes from the pod template which should be + // volumes is a list of named volumes from the pod template which should be // copied to the hook pod. Volumes names not found in pod spec are ignored. // An empty list means no volumes will be copied. Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` @@ -246,10 +246,10 @@ type ExecNewPodHook struct { // TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. type TagImageHook struct { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single // container this value will be defaulted to the name of that container. ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` - // To is the target ImageStreamTag to set the container's image onto. + // to is the target ImageStreamTag to set the container's image onto. To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` } @@ -264,9 +264,9 @@ func (t DeploymentTriggerPolicies) String() string { // DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. type DeploymentTriggerPolicy struct { - // Type of the trigger + // type of the trigger Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` - // ImageChangeParams represents the parameters for the ImageChange trigger. + // imageChangeParams represents the parameters for the ImageChange trigger. ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"` } @@ -284,42 +284,42 @@ const ( // DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. type DeploymentTriggerImageChangeParams struct { - // Automatic means that the detection of a new tag value should result in an image update + // automatic means that the detection of a new tag value should result in an image update // inside the pod template. Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` - // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // containerNames is used to restrict tag updates to the specified set of container names in a pod. // If multiple triggers point to the same containers, the resulting behavior is undefined. Future // API versions will make this a validation error. If ContainerNames does not point to a valid container, // the trigger will be ignored. Future API versions will make this a validation error. ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` - // From is a reference to an image stream tag to watch for changes. From.Name is the only + // from is a reference to an image stream tag to watch for changes. From.Name is the only // required subfield - if From.Namespace is blank, the namespace of the current deployment // trigger will be used. From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"` - // LastTriggeredImage is the last image to be triggered. + // lastTriggeredImage is the last image to be triggered. LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` } // DeploymentConfigStatus represents the current deployment state. type DeploymentConfigStatus struct { - // LatestVersion is used to determine whether the current deployment associated with a deployment + // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` - // ObservedGeneration is the most recent generation observed by the deployment config controller. + // observedGeneration is the most recent generation observed by the deployment config controller. ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` - // Replicas is the total number of pods targeted by this deployment config. + // replicas is the total number of pods targeted by this deployment config. Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` - // AvailableReplicas is the total number of available pods targeted by this deployment config. + // availableReplicas is the total number of available pods targeted by this deployment config. AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` - // Details are the reasons for the update to this deployment config. + // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` - // Conditions represents the latest available observations of a deployment config's current state. + // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` @@ -329,24 +329,24 @@ type DeploymentConfigStatus struct { // DeploymentDetails captures information about the causes of a deployment. type DeploymentDetails struct { - // Message is the user specified change message, if this deployment was triggered manually by the user + // message is the user specified change message, if this deployment was triggered manually by the user Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` - // Causes are extended data associated with all the causes for creating a new deployment + // causes are extended data associated with all the causes for creating a new deployment Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"` } // DeploymentCause captures information about a particular cause of a deployment. type DeploymentCause struct { - // Type of the trigger that resulted in the creation of a new deployment + // type of the trigger that resulted in the creation of a new deployment Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` - // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + // imageTrigger contains the image trigger details, if this trigger was fired based on an image change ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"` } // DeploymentCauseImageTrigger represents details about the cause of a deployment originating // from an image change trigger type DeploymentCauseImageTrigger struct { - // From is a reference to the changed object which triggered a deployment. The field may have + // from is a reference to the changed object which triggered a deployment. The field may have // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` } @@ -381,9 +381,9 @@ const ( // DeploymentCondition describes the state of a deployment config at a certain point. type DeploymentCondition struct { - // Type of deployment condition. + // type of deployment condition. Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` @@ -411,7 +411,7 @@ type DeploymentConfigList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of deployment configs + // items is a list of deployment configs Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -426,27 +426,27 @@ type DeploymentConfigList struct { // +openshift:compatibility-gen:level=1 type DeploymentConfigRollback struct { metav1.TypeMeta `json:",inline"` - // Name of the deployment config that will be rolled back. + // name of the deployment config that will be rolled back. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + // updatedAnnotations is a set of new annotations that will be added in the deployment config. UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` - // Spec defines the options to rollback generation. + // spec defines the options to rollback generation. Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"` } // DeploymentConfigRollbackSpec represents the options for rollback generation. type DeploymentConfigRollbackSpec struct { - // From points to a ReplicationController which is a deployment. + // from points to a ReplicationController which is a deployment. From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // Revision to rollback to. If set to 0, rollback to the last revision. + // revision to rollback to. If set to 0, rollback to the last revision. Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"` - // IncludeTriggers specifies whether to include config Triggers. + // includeTriggers specifies whether to include config Triggers. IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"` - // IncludeTemplate specifies whether to include the PodTemplateSpec. + // includeTemplate specifies whether to include the PodTemplateSpec. IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"` - // IncludeReplicationMeta specifies whether to include the replica count and selector. + // includeReplicationMeta specifies whether to include the replica count and selector. IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"` - // IncludeStrategy specifies whether to include the deployment Strategy. + // includeStrategy specifies whether to include the deployment Strategy. IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` } @@ -461,14 +461,14 @@ type DeploymentConfigRollbackSpec struct { // +openshift:compatibility-gen:level=1 type DeploymentRequest struct { metav1.TypeMeta `json:",inline"` - // Name of the deployment config for requesting a new deployment. + // name of the deployment config for requesting a new deployment. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Latest will update the deployment config with the latest state from all triggers. + // latest will update the deployment config with the latest state from all triggers. Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` - // Force will try to force a new deployment to run. If the deployment config is paused, + // force will try to force a new deployment to run. If the deployment config is paused, // then setting this to true will return an Invalid error. Force bool `json:"force" protobuf:"varint,3,opt,name=force"` - // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // excludeTriggers instructs the instantiator to avoid processing the specified triggers. // This field overrides the triggers from latest and allows clients to control specific // logic. This field is ignored if not specified. ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"` @@ -501,7 +501,7 @@ type DeploymentLogOptions struct { // The container for which to stream logs. Defaults to only container if there is one container in the pod. Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` - // Follow if true indicates that the build log should be streamed until + // follow if true indicates that the build log should be streamed until // the build terminates. Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` // Return previous deployment logs. Defaults to false. @@ -527,11 +527,11 @@ type DeploymentLogOptions struct { // slightly more or slightly less than the specified limit. LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` - // NoWait if true causes the call to return immediately even if the deployment + // nowait if true causes the call to return immediately even if the deployment // is not available yet. Otherwise the server will wait until the deployment has started. // TODO: Fix the tag to 'noWait' in v2 NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` - // Version of the deployment for which to view logs. + // version of the deployment for which to view logs. Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` } diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go index ab137d59b..55b53c5da 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -13,9 +13,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_CustomDeploymentStrategyParams = map[string]string{ "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", - "image": "Image specifies a container image which can carry out a deployment.", - "environment": "Environment holds the environment which will be given to the container for Image.", - "command": "Command is optional and overrides CMD in the container Image.", + "image": "image specifies a container image which can carry out a deployment.", + "environment": "environment holds the environment which will be given to the container for Image.", + "command": "command is optional and overrides CMD in the container Image.", } func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { @@ -24,8 +24,8 @@ func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { var map_DeploymentCause = map[string]string{ "": "DeploymentCause captures information about a particular cause of a deployment.", - "type": "Type of the trigger that resulted in the creation of a new deployment", - "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change", + "type": "type of the trigger that resulted in the creation of a new deployment", + "imageTrigger": "imageTrigger contains the image trigger details, if this trigger was fired based on an image change", } func (DeploymentCause) SwaggerDoc() map[string]string { @@ -34,7 +34,7 @@ func (DeploymentCause) SwaggerDoc() map[string]string { var map_DeploymentCauseImageTrigger = map[string]string{ "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger", - "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", + "from": "from is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", } func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { @@ -43,8 +43,8 @@ func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { var map_DeploymentCondition = map[string]string{ "": "DeploymentCondition describes the state of a deployment config at a certain point.", - "type": "Type of deployment condition.", - "status": "Status of the condition, one of True, False, Unknown.", + "type": "type of deployment condition.", + "status": "status of the condition, one of True, False, Unknown.", "lastUpdateTime": "The last time this condition was updated.", "lastTransitionTime": "The last time the condition transitioned from one status to another.", "reason": "The reason for the condition's last transition.", @@ -58,8 +58,8 @@ func (DeploymentCondition) SwaggerDoc() map[string]string { var map_DeploymentConfig = map[string]string{ "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec represents a desired deployment state and how to deploy to it.", - "status": "Status represents the current deployment state.", + "spec": "spec represents a desired deployment state and how to deploy to it.", + "status": "status represents the current deployment state.", } func (DeploymentConfig) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (DeploymentConfig) SwaggerDoc() map[string]string { var map_DeploymentConfigList = map[string]string{ "": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of deployment configs", + "items": "items is a list of deployment configs", } func (DeploymentConfigList) SwaggerDoc() map[string]string { @@ -78,9 +78,9 @@ func (DeploymentConfigList) SwaggerDoc() map[string]string { var map_DeploymentConfigRollback = map[string]string{ "": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "name": "Name of the deployment config that will be rolled back.", - "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.", - "spec": "Spec defines the options to rollback generation.", + "name": "name of the deployment config that will be rolled back.", + "updatedAnnotations": "updatedAnnotations is a set of new annotations that will be added in the deployment config.", + "spec": "spec defines the options to rollback generation.", } func (DeploymentConfigRollback) SwaggerDoc() map[string]string { @@ -89,12 +89,12 @@ func (DeploymentConfigRollback) SwaggerDoc() map[string]string { var map_DeploymentConfigRollbackSpec = map[string]string{ "": "DeploymentConfigRollbackSpec represents the options for rollback generation.", - "from": "From points to a ReplicationController which is a deployment.", - "revision": "Revision to rollback to. If set to 0, rollback to the last revision.", - "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.", - "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.", - "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.", - "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.", + "from": "from points to a ReplicationController which is a deployment.", + "revision": "revision to rollback to. If set to 0, rollback to the last revision.", + "includeTriggers": "includeTriggers specifies whether to include config Triggers.", + "includeTemplate": "includeTemplate specifies whether to include the PodTemplateSpec.", + "includeReplicationMeta": "includeReplicationMeta specifies whether to include the replica count and selector.", + "includeStrategy": "includeStrategy specifies whether to include the deployment Strategy.", } func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { @@ -103,15 +103,15 @@ func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { var map_DeploymentConfigSpec = map[string]string{ "": "DeploymentConfigSpec represents the desired state of the deployment.", - "strategy": "Strategy describes how a deployment is executed.", - "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", - "replicas": "Replicas is the number of desired replicas.", - "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", - "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", - "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", - "selector": "Selector is a label query over pods that should match the Replicas count.", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.", + "strategy": "strategy describes how a deployment is executed.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "triggers": "triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "replicas": "replicas is the number of desired replicas.", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "test": "test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "paused": "paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "selector": "selector is a label query over pods that should match the Replicas count.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected.", } func (DeploymentConfigSpec) SwaggerDoc() map[string]string { @@ -120,14 +120,14 @@ func (DeploymentConfigSpec) SwaggerDoc() map[string]string { var map_DeploymentConfigStatus = map[string]string{ "": "DeploymentConfigStatus represents the current deployment state.", - "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", - "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.", - "replicas": "Replicas is the total number of pods targeted by this deployment config.", - "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", - "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.", - "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", - "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", - "conditions": "Conditions represents the latest available observations of a deployment config's current state.", + "latestVersion": "latestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "observedGeneration": "observedGeneration is the most recent generation observed by the deployment config controller.", + "replicas": "replicas is the total number of pods targeted by this deployment config.", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "availableReplicas": "availableReplicas is the total number of available pods targeted by this deployment config.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "details": "details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "conditions represents the latest available observations of a deployment config's current state.", "readyReplicas": "Total number of ready pods targeted by this deployment.", } @@ -137,8 +137,8 @@ func (DeploymentConfigStatus) SwaggerDoc() map[string]string { var map_DeploymentDetails = map[string]string{ "": "DeploymentDetails captures information about the causes of a deployment.", - "message": "Message is the user specified change message, if this deployment was triggered manually by the user", - "causes": "Causes are extended data associated with all the causes for creating a new deployment", + "message": "message is the user specified change message, if this deployment was triggered manually by the user", + "causes": "causes are extended data associated with all the causes for creating a new deployment", } func (DeploymentDetails) SwaggerDoc() map[string]string { @@ -156,15 +156,15 @@ func (DeploymentLog) SwaggerDoc() map[string]string { var map_DeploymentLogOptions = map[string]string{ "": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow if true indicates that the build log should be streamed until the build terminates.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", "previous": "Return previous deployment logs. Defaults to false.", "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", - "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", - "version": "Version of the deployment for which to view logs.", + "nowait": "nowait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "version": "version of the deployment for which to view logs.", } func (DeploymentLogOptions) SwaggerDoc() map[string]string { @@ -173,10 +173,10 @@ func (DeploymentLogOptions) SwaggerDoc() map[string]string { var map_DeploymentRequest = map[string]string{ "": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "name": "Name of the deployment config for requesting a new deployment.", - "latest": "Latest will update the deployment config with the latest state from all triggers.", - "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", - "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", + "name": "name of the deployment config for requesting a new deployment.", + "latest": "latest will update the deployment config with the latest state from all triggers.", + "force": "force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "excludeTriggers": "excludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", } func (DeploymentRequest) SwaggerDoc() map[string]string { @@ -185,14 +185,14 @@ func (DeploymentRequest) SwaggerDoc() map[string]string { var map_DeploymentStrategy = map[string]string{ "": "DeploymentStrategy describes how to perform a deployment.", - "type": "Type is the name of a deployment strategy.", - "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", - "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.", - "rollingParams": "RollingParams are the input to the Rolling deployment strategy.", - "resources": "Resources contains resource requirements to execute the deployment and any hooks.", - "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", - "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", - "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", + "type": "type is the name of a deployment strategy.", + "customParams": "customParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "recreateParams": "recreateParams are the input to the Recreate deployment strategy.", + "rollingParams": "rollingParams are the input to the Rolling deployment strategy.", + "resources": "resources contains resource requirements to execute the deployment and any hooks.", + "labels": "labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "annotations": "annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "activeDeadlineSeconds": "activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", } func (DeploymentStrategy) SwaggerDoc() map[string]string { @@ -201,10 +201,10 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_DeploymentTriggerImageChangeParams = map[string]string{ "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", - "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", - "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", - "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", - "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.", + "automatic": "automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "containerNames": "containerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "from": "from is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "lastTriggeredImage": "lastTriggeredImage is the last image to be triggered.", } func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { @@ -213,8 +213,8 @@ func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { var map_DeploymentTriggerPolicy = map[string]string{ "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.", - "type": "Type of the trigger", - "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.", + "type": "type of the trigger", + "imageChangeParams": "imageChangeParams represents the parameters for the ImageChange trigger.", } func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { @@ -223,10 +223,10 @@ func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { var map_ExecNewPodHook = map[string]string{ "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", - "command": "Command is the action command and its arguments.", - "env": "Env is a set of environment variables to supply to the hook pod's container.", - "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", - "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", + "command": "command is the action command and its arguments.", + "env": "env is a set of environment variables to supply to the hook pod's container.", + "containerName": "containerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "volumes": "volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", } func (ExecNewPodHook) SwaggerDoc() map[string]string { @@ -235,9 +235,9 @@ func (ExecNewPodHook) SwaggerDoc() map[string]string { var map_LifecycleHook = map[string]string{ "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.", - "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.", - "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.", - "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", + "failurePolicy": "failurePolicy specifies what action to take if the hook fails.", + "execNewPod": "execNewPod specifies the options for a lifecycle hook backed by a pod.", + "tagImages": "tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", } func (LifecycleHook) SwaggerDoc() map[string]string { @@ -246,10 +246,10 @@ func (LifecycleHook) SwaggerDoc() map[string]string { var map_RecreateDeploymentStrategyParams = map[string]string{ "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.", - "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", - "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", - "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", - "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", + "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "pre": "pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "mid": "mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", } func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { @@ -258,13 +258,13 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { var map_RollingDeploymentStrategyParams = map[string]string{ "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", - "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", - "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", - "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", - "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", - "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", - "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", - "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", + "updatePeriodSeconds": "updatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "intervalSeconds": "intervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "maxSurge": "maxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "pre": "pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", } func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { @@ -273,8 +273,8 @@ func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { var map_TagImageHook = map[string]string{ "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.", - "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", - "to": "To is the target ImageStreamTag to set the container's image onto.", + "containerName": "containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "to": "to is the target ImageStreamTag to set the container's image onto.", } func (TagImageHook) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto index 28e4e8ce6..f7d7b772a 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.proto +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -16,10 +16,10 @@ option go_package = "github.com/openshift/api/authorization/v1"; // Action describes a request to the API server message Action { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces optional string namespace = 1; - // Verb is one of: get, list, watch, create, update, delete + // verb is one of: get, list, watch, create, update, delete optional string verb = 2; // Group is the API group of the resource @@ -30,19 +30,19 @@ message Action { // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined optional string resourceAPIVersion = 4; - // Resource is one of the existing resource types + // resource is one of the existing resource types optional string resource = 5; - // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + // resourceName is the name of the resource being requested for a "get" or deleted for a "delete" optional string resourceName = 6; - // Path is the path of a non resource URL + // path is the path of a non resource URL optional string path = 8; - // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) optional bool isNonResourceURL = 9; - // Content is the actual content of the request for create and update + // content is the actual content of the request for create and update // +kubebuilder:pruning:PreserveUnknownFields optional .k8s.io.apimachinery.pkg.runtime.RawExtension content = 7; } @@ -56,10 +56,10 @@ message ClusterRole { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Rules holds all the PolicyRules for this ClusterRole + // rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be // stomped by the controller. optional .k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3; @@ -76,28 +76,28 @@ message ClusterRoleBinding { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames userNames = 2; - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames groupNames = 3; - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. repeated .k8s.io.api.core.v1.ObjectReference subjects = 4; - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. optional .k8s.io.api.core.v1.ObjectReference roleRef = 5; @@ -112,7 +112,7 @@ message ClusterRoleBindingList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of ClusterRoleBindings + // items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; } @@ -125,14 +125,14 @@ message ClusterRoleList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of ClusterRoles + // items is a list of ClusterRoles repeated ClusterRole items = 2; } // GroupRestriction matches a group either by a string match on the group name // or a label selector applied to group labels. message GroupRestriction { - // Groups is a list of groups used to match against an individual user's + // groups is a list of groups used to match against an individual user's // groups. If the user is a member of one of the whitelisted groups, the user // is allowed to be bound to a role. // +nullable @@ -175,14 +175,14 @@ message LocalSubjectAccessReview { // Action describes the action being tested. The Namespace element is FORCED to the current namespace. optional Action Action = 1; - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. optional string user = 2; - // Groups is optional. Groups is the list of groups to which the User belongs. + // groups is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false repeated string groups = 3; - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -191,37 +191,37 @@ message LocalSubjectAccessReview { // NamedClusterRole relates a name with a cluster role message NamedClusterRole { - // Name is the name of the cluster role + // name is the name of the cluster role optional string name = 1; - // Role is the cluster role being named + // role is the cluster role being named optional ClusterRole role = 2; } // NamedClusterRoleBinding relates a name with a cluster role binding message NamedClusterRoleBinding { - // Name is the name of the cluster role binding + // name is the name of the cluster role binding optional string name = 1; - // RoleBinding is the cluster role binding being named + // roleBinding is the cluster role binding being named optional ClusterRoleBinding roleBinding = 2; } // NamedRole relates a Role with a name message NamedRole { - // Name is the name of the role + // name is the name of the role optional string name = 1; - // Role is the role being named + // role is the role being named optional Role role = 2; } // NamedRoleBinding relates a role binding with a name message NamedRoleBinding { - // Name is the name of the role binding + // name is the name of the role binding optional string name = 1; - // RoleBinding is the role binding being named + // roleBinding is the role binding being named optional RoleBinding roleBinding = 2; } @@ -246,25 +246,25 @@ message OptionalScopes { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. repeated string verbs = 1; - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. // +kubebuilder:pruning:PreserveUnknownFields optional .k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2; - // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request // will be allowed // +optional // +nullable repeated string apiGroups = 3; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // resources is a list of resources this rule applies to. ResourceAll represents all resources. repeated string resources = 4; - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. repeated string resourceNames = 5; // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -291,7 +291,7 @@ message ResourceAccessReview { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message ResourceAccessReviewResponse { - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review optional string namespace = 1; // UsersSlice is the list of users who can perform the action @@ -317,7 +317,7 @@ message Role { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Rules holds all the PolicyRules for this Role + // rules holds all the PolicyRules for this Role repeated PolicyRule rules = 2; } @@ -332,28 +332,28 @@ message RoleBinding { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames userNames = 2; - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames groupNames = 3; - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. repeated .k8s.io.api.core.v1.ObjectReference subjects = 4; - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. optional .k8s.io.api.core.v1.ObjectReference roleRef = 5; @@ -368,7 +368,7 @@ message RoleBindingList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of RoleBindings + // items is a list of RoleBindings repeated RoleBinding items = 2; } @@ -390,7 +390,7 @@ message RoleBindingRestriction { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the matcher. + // spec defines the matcher. optional RoleBindingRestrictionSpec spec = 2; } @@ -403,22 +403,22 @@ message RoleBindingRestrictionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of RoleBindingRestriction objects. + // items is a list of RoleBindingRestriction objects. repeated RoleBindingRestriction items = 2; } // RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one // field must be non-nil. message RoleBindingRestrictionSpec { - // UserRestriction matches against user subjects. + // userrestriction matches against user subjects. // +nullable optional UserRestriction userrestriction = 1; - // GroupRestriction matches against group subjects. + // grouprestriction matches against group subjects. // +nullable optional GroupRestriction grouprestriction = 2; - // ServiceAccountRestriction matches against service-account subjects. + // serviceaccountrestriction matches against service-account subjects. // +nullable optional ServiceAccountRestriction serviceaccountrestriction = 3; } @@ -432,7 +432,7 @@ message RoleList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of Roles + // items is a list of Roles repeated Role items = 2; } @@ -445,16 +445,16 @@ message SelfSubjectRulesReview { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check optional SelfSubjectRulesReviewSpec spec = 1; - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have optional SubjectRulesReviewStatus status = 2; } // SelfSubjectRulesReviewSpec adds information about how to conduct the check message SelfSubjectRulesReviewSpec { - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil means "use the scopes on this request". // +k8s:conversion-gen=false optional OptionalScopes scopes = 1; @@ -463,10 +463,10 @@ message SelfSubjectRulesReviewSpec { // ServiceAccountReference specifies a service account and namespace by their // names. message ServiceAccountReference { - // Name is the name of the service account. + // name is the name of the service account. optional string name = 1; - // Namespace is the namespace of the service account. Service accounts from + // namespace is the namespace of the service account. Service accounts from // inside the whitelisted namespaces are allowed to be bound to roles. If // Namespace is empty, then the namespace of the RoleBindingRestriction in // which the ServiceAccountReference is embedded is used. @@ -477,10 +477,10 @@ message ServiceAccountReference { // either the service-account name or the name of the service account's // namespace. message ServiceAccountRestriction { - // ServiceAccounts specifies a list of literal service-account names. + // serviceaccounts specifies a list of literal service-account names. repeated ServiceAccountReference serviceaccounts = 1; - // Namespaces specifies a list of literal namespace names. + // namespaces specifies a list of literal namespace names. repeated string namespaces = 2; } @@ -496,14 +496,14 @@ message SubjectAccessReview { // Action describes the action being tested. optional Action Action = 1; - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. optional string user = 2; // GroupsSlice is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false repeated string groups = 3; - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -515,16 +515,16 @@ message SubjectAccessReview { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message SubjectAccessReviewResponse { - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review optional string namespace = 1; - // Allowed is required. True if the action would be allowed, false otherwise. + // allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 2; - // Reason is optional. It indicates why a request was allowed or denied. + // reason is optional. It indicates why a request was allowed or denied. optional string reason = 3; - // EvaluationError is an indication that some error occurred during the authorization check. + // evaluationError is an indication that some error occurred during the authorization check. // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. optional string evaluationError = 4; @@ -539,31 +539,31 @@ message SubjectRulesReview { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check optional SubjectRulesReviewSpec spec = 1; - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have optional SubjectRulesReviewStatus status = 2; } // SubjectRulesReviewSpec adds information about how to conduct the check message SubjectRulesReviewSpec { - // User is optional. At least one of User and Groups must be specified. + // user is optional. At least one of User and Groups must be specified. optional string user = 1; - // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. repeated string groups = 2; - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". optional OptionalScopes scopes = 3; } // SubjectRulesReviewStatus is contains the result of a rules check message SubjectRulesReviewStatus { - // Rules is the list of rules (no particular sort) that are allowed for the subject + // rules is the list of rules (no particular sort) that are allowed for the subject repeated PolicyRule rules = 1; - // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. optional string evaluationError = 2; } @@ -572,10 +572,10 @@ message SubjectRulesReviewStatus { // a string match on the name of a group to which the user belongs, or a label // selector applied to the user labels. message UserRestriction { - // Users specifies a list of literal user names. + // users specifies a list of literal user names. repeated string users = 1; - // Groups specifies a list of literal group names. + // groups specifies a list of literal group names. // +nullable repeated string groups = 2; diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go index e8dd0c29f..bf4071867 100644 --- a/vendor/github.com/openshift/api/authorization/v1/types.go +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -28,21 +28,21 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. // +kubebuilder:pruning:PreserveUnknownFields AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` - // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request // will be allowed // +optional // +nullable APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // resources is a list of resources this rule applies to. ResourceAll represents all resources. Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. @@ -73,7 +73,7 @@ type Role struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Rules holds all the PolicyRules for this Role + // rules holds all the PolicyRules for this Role Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -102,26 +102,26 @@ type RoleBinding struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` @@ -129,17 +129,17 @@ type RoleBinding struct { // NamedRole relates a Role with a name type NamedRole struct { - // Name is the name of the role + // name is the name of the role Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Role is the role being named + // role is the role being named Role Role `json:"role" protobuf:"bytes,2,opt,name=role"` } // NamedRoleBinding relates a role binding with a name type NamedRoleBinding struct { - // Name is the name of the role binding + // name is the name of the role binding Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // RoleBinding is the role binding being named + // roleBinding is the role binding being named RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` } @@ -158,16 +158,16 @@ type SelfSubjectRulesReview struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // SelfSubjectRulesReviewSpec adds information about how to conduct the check type SelfSubjectRulesReviewSpec struct { - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil means "use the scopes on this request". // +k8s:conversion-gen=false Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"` @@ -188,28 +188,28 @@ type SubjectRulesReview struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // SubjectRulesReviewSpec adds information about how to conduct the check type SubjectRulesReviewSpec struct { - // User is optional. At least one of User and Groups must be specified. + // user is optional. At least one of User and Groups must be specified. User string `json:"user" protobuf:"bytes,1,opt,name=user"` - // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"` } // SubjectRulesReviewStatus is contains the result of a rules check type SubjectRulesReviewStatus struct { - // Rules is the list of rules (no particular sort) that are allowed for the subject + // rules is the list of rules (no particular sort) that are allowed for the subject Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` - // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` } @@ -223,7 +223,7 @@ type SubjectRulesReviewStatus struct { type ResourceAccessReviewResponse struct { metav1.TypeMeta `json:",inline"` - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` // UsersSlice is the list of users who can perform the action // +k8s:conversion-gen=false @@ -269,13 +269,13 @@ type ResourceAccessReview struct { type SubjectAccessReviewResponse struct { metav1.TypeMeta `json:",inline"` - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` - // Allowed is required. True if the action would be allowed, false otherwise. + // allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` - // Reason is optional. It indicates why a request was allowed or denied. + // reason is optional. It indicates why a request was allowed or denied. Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - // EvaluationError is an indication that some error occurred during the authorization check. + // evaluationError is an indication that some error occurred during the authorization check. // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` @@ -309,12 +309,12 @@ type SubjectAccessReview struct { // Action describes the action being tested. Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. User string `json:"user" protobuf:"bytes,2,opt,name=user"` // GroupsSlice is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -359,12 +359,12 @@ type LocalSubjectAccessReview struct { // Action describes the action being tested. The Namespace element is FORCED to the current namespace. Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. User string `json:"user" protobuf:"bytes,2,opt,name=user"` - // Groups is optional. Groups is the list of groups to which the User belongs. + // groups is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -373,9 +373,9 @@ type LocalSubjectAccessReview struct { // Action describes a request to the API server type Action struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Verb is one of: get, list, watch, create, update, delete + // verb is one of: get, list, watch, create, update, delete Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"` // Group is the API group of the resource // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined @@ -383,15 +383,15 @@ type Action struct { // Version is the API version of the resource // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"` - // Resource is one of the existing resource types + // resource is one of the existing resource types Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"` - // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + // resourceName is the name of the resource being requested for a "get" or deleted for a "delete" ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"` - // Path is the path of a non resource URL + // path is the path of a non resource URL Path string `json:"path" protobuf:"bytes,8,opt,name=path"` - // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"` - // Content is the actual content of the request for create and update + // content is the actual content of the request for create and update // +kubebuilder:pruning:PreserveUnknownFields Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"` } @@ -409,7 +409,7 @@ type RoleBindingList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of RoleBindings + // items is a list of RoleBindings Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -426,7 +426,7 @@ type RoleList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of Roles + // items is a list of Roles Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -445,10 +445,10 @@ type ClusterRole struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Rules holds all the PolicyRules for this ClusterRole + // rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be // stomped by the controller. AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` @@ -471,26 +471,26 @@ type ClusterRoleBinding struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` @@ -498,17 +498,17 @@ type ClusterRoleBinding struct { // NamedClusterRole relates a name with a cluster role type NamedClusterRole struct { - // Name is the name of the cluster role + // name is the name of the cluster role Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Role is the cluster role being named + // role is the cluster role being named Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"` } // NamedClusterRoleBinding relates a name with a cluster role binding type NamedClusterRoleBinding struct { - // Name is the name of the cluster role binding + // name is the name of the cluster role binding Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // RoleBinding is the cluster role binding being named + // roleBinding is the cluster role binding being named RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` } @@ -525,7 +525,7 @@ type ClusterRoleBindingList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of ClusterRoleBindings + // items is a list of ClusterRoleBindings Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -542,7 +542,7 @@ type ClusterRoleList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of ClusterRoles + // items is a list of ClusterRoles Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -569,22 +569,22 @@ type RoleBindingRestriction struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the matcher. + // spec defines the matcher. Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } // RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one // field must be non-nil. type RoleBindingRestrictionSpec struct { - // UserRestriction matches against user subjects. + // userrestriction matches against user subjects. // +nullable UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"` - // GroupRestriction matches against group subjects. + // grouprestriction matches against group subjects. // +nullable GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"` - // ServiceAccountRestriction matches against service-account subjects. + // serviceaccountrestriction matches against service-account subjects. // +nullable ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"` } @@ -602,7 +602,7 @@ type RoleBindingRestrictionList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of RoleBindingRestriction objects. + // items is a list of RoleBindingRestriction objects. Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -610,10 +610,10 @@ type RoleBindingRestrictionList struct { // a string match on the name of a group to which the user belongs, or a label // selector applied to the user labels. type UserRestriction struct { - // Users specifies a list of literal user names. + // users specifies a list of literal user names. Users []string `json:"users" protobuf:"bytes,1,rep,name=users"` - // Groups specifies a list of literal group names. + // groups specifies a list of literal group names. // +nullable Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` @@ -625,7 +625,7 @@ type UserRestriction struct { // GroupRestriction matches a group either by a string match on the group name // or a label selector applied to group labels. type GroupRestriction struct { - // Groups is a list of groups used to match against an individual user's + // groups is a list of groups used to match against an individual user's // groups. If the user is a member of one of the whitelisted groups, the user // is allowed to be bound to a role. // +nullable @@ -640,20 +640,20 @@ type GroupRestriction struct { // either the service-account name or the name of the service account's // namespace. type ServiceAccountRestriction struct { - // ServiceAccounts specifies a list of literal service-account names. + // serviceaccounts specifies a list of literal service-account names. ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"` - // Namespaces specifies a list of literal namespace names. + // namespaces specifies a list of literal namespace names. Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` } // ServiceAccountReference specifies a service account and namespace by their // names. type ServiceAccountReference struct { - // Name is the name of the service account. + // name is the name of the service account. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Namespace is the namespace of the service account. Service accounts from + // namespace is the namespace of the service account. Service accounts from // inside the whitelisted namespaces are allowed to be bound to roles. If // Namespace is empty, then the namespace of the RoleBindingRestriction in // which the ServiceAccountReference is embedded is used. diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go index a8f9b374e..a1c28a3ec 100644 --- a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go @@ -13,15 +13,15 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_Action = map[string]string{ "": "Action describes a request to the API server", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", - "verb": "Verb is one of: get, list, watch, create, update, delete", + "namespace": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "verb": "verb is one of: get, list, watch, create, update, delete", "resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined", "resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined", - "resource": "Resource is one of the existing resource types", - "resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", - "path": "Path is the path of a non resource URL", - "isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", - "content": "Content is the actual content of the request for create and update", + "resource": "resource is one of the existing resource types", + "resourceName": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "path": "path is the path of a non resource URL", + "isNonResourceURL": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "content": "content is the actual content of the request for create and update", } func (Action) SwaggerDoc() map[string]string { @@ -31,8 +31,8 @@ func (Action) SwaggerDoc() map[string]string { var map_ClusterRole = map[string]string{ "": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "rules": "Rules holds all the PolicyRules for this ClusterRole", - "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + "rules": "rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { @@ -42,10 +42,10 @@ func (ClusterRole) SwaggerDoc() map[string]string { var map_ClusterRoleBinding = map[string]string{ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", - "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", + "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "roleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", } func (ClusterRoleBinding) SwaggerDoc() map[string]string { @@ -55,7 +55,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { var map_ClusterRoleBindingList = map[string]string{ "": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of ClusterRoleBindings", + "items": "items is a list of ClusterRoleBindings", } func (ClusterRoleBindingList) SwaggerDoc() map[string]string { @@ -65,7 +65,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { var map_ClusterRoleList = map[string]string{ "": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of ClusterRoles", + "items": "items is a list of ClusterRoles", } func (ClusterRoleList) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_GroupRestriction = map[string]string{ "": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.", - "groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", + "groups": "groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", "labels": "Selectors specifies a list of label selectors over group labels.", } @@ -102,9 +102,9 @@ func (LocalResourceAccessReview) SwaggerDoc() map[string]string { var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", - "groups": "Groups is optional. Groups is the list of groups to which the User belongs.", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", + "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "groups is optional. Groups is the list of groups to which the User belongs.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", } func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { var map_NamedClusterRole = map[string]string{ "": "NamedClusterRole relates a name with a cluster role", - "name": "Name is the name of the cluster role", - "role": "Role is the cluster role being named", + "name": "name is the name of the cluster role", + "role": "role is the cluster role being named", } func (NamedClusterRole) SwaggerDoc() map[string]string { @@ -123,8 +123,8 @@ func (NamedClusterRole) SwaggerDoc() map[string]string { var map_NamedClusterRoleBinding = map[string]string{ "": "NamedClusterRoleBinding relates a name with a cluster role binding", - "name": "Name is the name of the cluster role binding", - "roleBinding": "RoleBinding is the cluster role binding being named", + "name": "name is the name of the cluster role binding", + "roleBinding": "roleBinding is the cluster role binding being named", } func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { @@ -133,8 +133,8 @@ func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { var map_NamedRole = map[string]string{ "": "NamedRole relates a Role with a name", - "name": "Name is the name of the role", - "role": "Role is the role being named", + "name": "name is the name of the role", + "role": "role is the role being named", } func (NamedRole) SwaggerDoc() map[string]string { @@ -143,8 +143,8 @@ func (NamedRole) SwaggerDoc() map[string]string { var map_NamedRoleBinding = map[string]string{ "": "NamedRoleBinding relates a role binding with a name", - "name": "Name is the name of the role binding", - "roleBinding": "RoleBinding is the role binding being named", + "name": "name is the name of the role binding", + "roleBinding": "roleBinding is the role binding being named", } func (NamedRoleBinding) SwaggerDoc() map[string]string { @@ -153,11 +153,11 @@ func (NamedRoleBinding) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "verbs": "verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", + "resources": "resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.", } @@ -176,7 +176,7 @@ func (ResourceAccessReview) SwaggerDoc() map[string]string { var map_ResourceAccessReviewResponse = map[string]string{ "": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "namespace": "Namespace is the namespace used for the access review", + "namespace": "namespace is the namespace used for the access review", "users": "UsersSlice is the list of users who can perform the action", "groups": "GroupsSlice is the list of groups who can perform the action", "evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", @@ -189,7 +189,7 @@ func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string { var map_Role = map[string]string{ "": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "rules": "Rules holds all the PolicyRules for this Role", + "rules": "rules holds all the PolicyRules for this Role", } func (Role) SwaggerDoc() map[string]string { @@ -199,10 +199,10 @@ func (Role) SwaggerDoc() map[string]string { var map_RoleBinding = map[string]string{ "": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", - "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", + "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "roleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", } func (RoleBinding) SwaggerDoc() map[string]string { @@ -212,7 +212,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { var map_RoleBindingList = map[string]string{ "": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of RoleBindings", + "items": "items is a list of RoleBindings", } func (RoleBindingList) SwaggerDoc() map[string]string { @@ -222,7 +222,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { var map_RoleBindingRestriction = map[string]string{ "": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the matcher.", + "spec": "spec defines the matcher.", } func (RoleBindingRestriction) SwaggerDoc() map[string]string { @@ -232,7 +232,7 @@ func (RoleBindingRestriction) SwaggerDoc() map[string]string { var map_RoleBindingRestrictionList = map[string]string{ "": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of RoleBindingRestriction objects.", + "items": "items is a list of RoleBindingRestriction objects.", } func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { @@ -241,9 +241,9 @@ func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { var map_RoleBindingRestrictionSpec = map[string]string{ "": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.", - "userrestriction": "UserRestriction matches against user subjects.", - "grouprestriction": "GroupRestriction matches against group subjects.", - "serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.", + "userrestriction": "userrestriction matches against user subjects.", + "grouprestriction": "grouprestriction matches against group subjects.", + "serviceaccountrestriction": "serviceaccountrestriction matches against service-account subjects.", } func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { @@ -253,7 +253,7 @@ func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { var map_RoleList = map[string]string{ "": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of Roles", + "items": "items is a list of Roles", } func (RoleList) SwaggerDoc() map[string]string { @@ -263,8 +263,8 @@ func (RoleList) SwaggerDoc() map[string]string { var map_SelfSubjectRulesReview = map[string]string{ "": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec adds information about how to conduct the check", - "status": "Status is completed by the server to tell which permissions you have", + "spec": "spec adds information about how to conduct the check", + "status": "status is completed by the server to tell which permissions you have", } func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { @@ -273,7 +273,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { var map_SelfSubjectRulesReviewSpec = map[string]string{ "": "SelfSubjectRulesReviewSpec adds information about how to conduct the check", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", } func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { @@ -282,8 +282,8 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { var map_ServiceAccountReference = map[string]string{ "": "ServiceAccountReference specifies a service account and namespace by their names.", - "name": "Name is the name of the service account.", - "namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", + "name": "name is the name of the service account.", + "namespace": "namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", } func (ServiceAccountReference) SwaggerDoc() map[string]string { @@ -292,8 +292,8 @@ func (ServiceAccountReference) SwaggerDoc() map[string]string { var map_ServiceAccountRestriction = map[string]string{ "": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.", - "serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.", - "namespaces": "Namespaces specifies a list of literal namespace names.", + "serviceaccounts": "serviceaccounts specifies a list of literal service-account names.", + "namespaces": "namespaces specifies a list of literal namespace names.", } func (ServiceAccountRestriction) SwaggerDoc() map[string]string { @@ -303,9 +303,9 @@ func (ServiceAccountRestriction) SwaggerDoc() map[string]string { var map_SubjectAccessReview = map[string]string{ "": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.", "groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", } func (SubjectAccessReview) SwaggerDoc() map[string]string { @@ -314,10 +314,10 @@ func (SubjectAccessReview) SwaggerDoc() map[string]string { var map_SubjectAccessReviewResponse = map[string]string{ "": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "namespace": "Namespace is the namespace used for the access review", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", - "reason": "Reason is optional. It indicates why a request was allowed or denied.", - "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", + "namespace": "namespace is the namespace used for the access review", + "allowed": "allowed is required. True if the action would be allowed, false otherwise.", + "reason": "reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "evaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", } func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { @@ -327,8 +327,8 @@ func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { var map_SubjectRulesReview = map[string]string{ "": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec adds information about how to conduct the check", - "status": "Status is completed by the server to tell which permissions you have", + "spec": "spec adds information about how to conduct the check", + "status": "status is completed by the server to tell which permissions you have", } func (SubjectRulesReview) SwaggerDoc() map[string]string { @@ -337,9 +337,9 @@ func (SubjectRulesReview) SwaggerDoc() map[string]string { var map_SubjectRulesReviewSpec = map[string]string{ "": "SubjectRulesReviewSpec adds information about how to conduct the check", - "user": "User is optional. At least one of User and Groups must be specified.", - "groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", + "user": "user is optional. At least one of User and Groups must be specified.", + "groups": "groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", } func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { @@ -348,8 +348,8 @@ func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { var map_SubjectRulesReviewStatus = map[string]string{ "": "SubjectRulesReviewStatus is contains the result of a rules check", - "rules": "Rules is the list of rules (no particular sort) that are allowed for the subject", - "evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", + "rules": "rules is the list of rules (no particular sort) that are allowed for the subject", + "evaluationError": "evaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", } func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { @@ -358,8 +358,8 @@ func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { var map_UserRestriction = map[string]string{ "": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.", - "users": "Users specifies a list of literal user names.", - "groups": "Groups specifies a list of literal group names.", + "users": "users specifies a list of literal user names.", + "groups": "groups specifies a list of literal group names.", "labels": "Selectors specifies a list of label selectors over user labels.", } diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto index b71670f4e..92ae73426 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.proto +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -81,10 +81,10 @@ message Build { // BuildCondition describes the state of a build at a certain point. message BuildCondition { - // Type of build condition. + // type of build condition. optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; // The last time this condition was updated. @@ -141,7 +141,7 @@ message BuildConfigSpec { // +optional repeated BuildTriggerPolicy triggers = 1; - // RunPolicy describes how the new build created from this build + // runPolicy describes how the new build created from this build // configuration will be scheduled for execution. // This is optional, if not specified we default to "Serial". optional string runPolicy = 2; @@ -165,7 +165,7 @@ message BuildConfigStatus { // lastVersion is used to inform about number of last triggered build. optional int64 lastVersion = 1; - // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. repeated ImageChangeTriggerStatus imageChangeTriggers = 2; @@ -231,7 +231,7 @@ message BuildLogOptions { // slightly more or slightly less than the specified limit. optional int64 limitBytes = 8; - // noWait if true causes the call to return immediately even if the build + // nowait if true causes the call to return immediately even if the build // is not available yet. Otherwise the server will wait until the build has started. // TODO: Fix the tag to 'noWait' in v2 optional bool nowait = 9; @@ -259,7 +259,7 @@ message BuildOutput { // the build unless Namespace is specified. optional .k8s.io.api.core.v1.ObjectReference to = 1; - // PushSecret is the name of a Secret that would be used for setting + // pushSecret is the name of a Secret that would be used for setting // up the authentication for executing the Docker push to authentication // enabled Docker Registry (or Docker Hub). optional .k8s.io.api.core.v1.LocalObjectReference pushSecret = 2; @@ -392,10 +392,10 @@ message BuildRequest { // build configuration and contains information about those triggers. repeated BuildTriggerCause triggeredBy = 8; - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build optional DockerStrategyOptions dockerStrategyOptions = 9; - // SourceStrategyOptions contains additional source-strategy specific options for the build + // sourceStrategyOptions contains additional source-strategy specific options for the build optional SourceStrategyOptions sourceStrategyOptions = 10; } @@ -510,7 +510,7 @@ message BuildStatus { // logSnippet is the last few lines of the build log. This value is only set for builds that failed. optional string logSnippet = 12; - // Conditions represents the latest available observations of a build's current state. + // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge repeated BuildCondition conditions = 13; @@ -550,7 +550,7 @@ message BuildStrategy { // customStrategy holds the parameters to the Custom build strategy optional CustomBuildStrategy customStrategy = 4; - // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. // Deprecated: use OpenShift Pipelines optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; } @@ -567,7 +567,7 @@ message BuildTriggerCause { // genericWebHook holds data about a builds generic webhook trigger. optional GenericWebHookCause genericWebHook = 2; - // gitHubWebHook represents data for a GitHub webhook that fired a + // githubWebHook represents data for a GitHub webhook that fired a // specific build. optional GitHubWebHookCause githubWebHook = 3; @@ -575,11 +575,11 @@ message BuildTriggerCause { // that triggered a new build. optional ImageChangeCause imageChangeBuild = 4; - // GitLabWebHook represents data for a GitLab webhook that fired a specific + // gitlabWebHook represents data for a GitLab webhook that fired a specific // build. optional GitLabWebHookCause gitlabWebHook = 5; - // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // bitbucketWebHook represents data for a Bitbucket webhook that fired a // specific build. optional BitbucketWebHookCause bitbucketWebHook = 6; } @@ -743,10 +743,10 @@ message CommonSpec { // causes into struct so we can share it in the specific causes; it is too late for // GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. message CommonWebHookCause { - // Revision is the git source revision information of the trigger. + // revision is the git source revision information of the trigger. optional SourceRevision revision = 1; - // Secret is the obfuscated webhook secret that triggered a build. + // secret is the obfuscated webhook secret that triggered a build. optional string secret = 2; } @@ -884,7 +884,7 @@ message GenericWebHookEvent { // ValueFrom is not supported. repeated .k8s.io.api.core.v1.EnvVar env = 3; - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build optional DockerStrategyOptions dockerStrategyOptions = 4; } @@ -918,7 +918,7 @@ message GitInfo { optional GitSourceRevision gitSourceRevision = 2; - // Refs is a list of GitRefs for the provided repo - generally sent + // refs is a list of GitRefs for the provided repo - generally sent // when used from a post-receive hook. This field is optional and is // used when sending multiple refs repeated GitRefInfo refs = 3; @@ -1061,12 +1061,12 @@ message ImageStreamTagReference { // JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. // Deprecated: use OpenShift Pipelines message JenkinsPipelineBuildStrategy { - // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. optional string jenkinsfilePath = 1; - // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. optional string jenkinsfile = 2; // env contains additional environment variables you want to pass into a build pipeline. @@ -1115,7 +1115,7 @@ message SecretBuildSource { // SecretLocalReference contains information that points to the local secret being used message SecretLocalReference { - // Name is the name of the resource in the same namespace being referenced + // name is the name of the resource in the same namespace being referenced optional string name = 1; } @@ -1176,7 +1176,7 @@ message SourceRevision { // +k8s:conversion-gen=false optional string type = 1; - // Git contains information about git-based build source + // git contains information about git-based build source optional GitSourceRevision git = 2; } diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go index ba836aad8..12bf67db1 100644 --- a/vendor/github.com/openshift/api/build/v1/types.go +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -116,7 +116,7 @@ type BuildTriggerCause struct { // genericWebHook holds data about a builds generic webhook trigger. GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` - // gitHubWebHook represents data for a GitHub webhook that fired a + // githubWebHook represents data for a GitHub webhook that fired a //specific build. GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` @@ -124,11 +124,11 @@ type BuildTriggerCause struct { // that triggered a new build. ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` - // GitLabWebHook represents data for a GitLab webhook that fired a specific + // gitlabWebHook represents data for a GitLab webhook that fired a specific // build. GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` - // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // bitbucketWebHook represents data for a Bitbucket webhook that fired a // specific build. BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` } @@ -158,10 +158,10 @@ type GitHubWebHookCause struct { // causes into struct so we can share it in the specific causes; it is too late for // GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. type CommonWebHookCause struct { - // Revision is the git source revision information of the trigger. + // revision is the git source revision information of the trigger. Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` - // Secret is the obfuscated webhook secret that triggered a build. + // secret is the obfuscated webhook secret that triggered a build. Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` } @@ -237,7 +237,7 @@ type BuildStatus struct { // logSnippet is the last few lines of the build log. This value is only set for builds that failed. LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` - // Conditions represents the latest available observations of a build's current state. + // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` @@ -358,9 +358,9 @@ type BuildConditionType string // BuildCondition describes the state of a build at a certain point. type BuildCondition struct { - // Type of build condition. + // type of build condition. Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` @@ -562,7 +562,7 @@ type SourceRevision struct { // +k8s:conversion-gen=false Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` - // Git contains information about git-based build source + // git contains information about git-based build source Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` } @@ -632,7 +632,7 @@ type BuildStrategy struct { // customStrategy holds the parameters to the Custom build strategy CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` - // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. // Deprecated: use OpenShift Pipelines JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` } @@ -801,12 +801,12 @@ type SourceBuildStrategy struct { // JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. // Deprecated: use OpenShift Pipelines type JenkinsPipelineBuildStrategy struct { - // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` - // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` // env contains additional environment variables you want to pass into a build pipeline. @@ -911,7 +911,7 @@ type BuildOutput struct { // the build unless Namespace is specified. To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` - // PushSecret is the name of a Secret that would be used for setting + // pushSecret is the name of a Secret that would be used for setting // up the authentication for executing the Docker push to authentication // enabled Docker Registry (or Docker Hub). PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` @@ -964,7 +964,7 @@ type BuildConfigSpec struct { // +optional Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"` - // RunPolicy describes how the new build created from this build + // runPolicy describes how the new build created from this build // configuration will be scheduled for execution. // This is optional, if not specified we default to "Serial". RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` @@ -1007,7 +1007,7 @@ type BuildConfigStatus struct { // lastVersion is used to inform about number of last triggered build. LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` - // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` @@ -1015,7 +1015,7 @@ type BuildConfigStatus struct { // SecretLocalReference contains information that points to the local secret being used type SecretLocalReference struct { - // Name is the name of the resource in the same namespace being referenced + // name is the name of the resource in the same namespace being referenced Name string `json:"name" protobuf:"bytes,1,opt,name=name"` } @@ -1203,7 +1203,7 @@ type GenericWebHookEvent struct { // ValueFrom is not supported. Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"` } @@ -1212,7 +1212,7 @@ type GitInfo struct { GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` - // Refs is a list of GitRefs for the provided repo - generally sent + // refs is a list of GitRefs for the provided repo - generally sent // when used from a post-receive hook. This field is optional and is // used when sending multiple refs Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"` @@ -1287,10 +1287,10 @@ type BuildRequest struct { // build configuration and contains information about those triggers. TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"` - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"` - // SourceStrategyOptions contains additional source-strategy specific options for the build + // sourceStrategyOptions contains additional source-strategy specific options for the build SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"` } @@ -1368,7 +1368,7 @@ type BuildLogOptions struct { // slightly more or slightly less than the specified limit. LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` - // noWait if true causes the call to return immediately even if the build + // nowait if true causes the call to return immediately even if the build // is not available yet. Otherwise the server will wait until the build has started. // TODO: Fix the tag to 'noWait' in v2 NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go index 72ff507b7..1da784353 100644 --- a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -57,8 +57,8 @@ func (Build) SwaggerDoc() map[string]string { var map_BuildCondition = map[string]string{ "": "BuildCondition describes the state of a build at a certain point.", - "type": "Type of build condition.", - "status": "Status of the condition, one of True, False, Unknown.", + "type": "type of build condition.", + "status": "status of the condition, one of True, False, Unknown.", "lastUpdateTime": "The last time this condition was updated.", "lastTransitionTime": "The last time the condition transitioned from one status to another.", "reason": "The reason for the condition's last transition.", @@ -93,7 +93,7 @@ func (BuildConfigList) SwaggerDoc() map[string]string { var map_BuildConfigSpec = map[string]string{ "": "BuildConfigSpec describes when and how builds are created", "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", - "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "runPolicy": "runPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", } @@ -105,7 +105,7 @@ func (BuildConfigSpec) SwaggerDoc() map[string]string { var map_BuildConfigStatus = map[string]string{ "": "BuildConfigStatus contains current state of the build config object.", "lastVersion": "lastVersion is used to inform about number of last triggered build.", - "imageChangeTriggers": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", + "imageChangeTriggers": "imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", } func (BuildConfigStatus) SwaggerDoc() map[string]string { @@ -140,7 +140,7 @@ var map_BuildLogOptions = map[string]string{ "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", - "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "nowait": "nowait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", "version": "version of the build for which to view logs.", "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", } @@ -152,7 +152,7 @@ func (BuildLogOptions) SwaggerDoc() map[string]string { var map_BuildOutput = map[string]string{ "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", - "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "pushSecret": "pushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", } @@ -181,8 +181,8 @@ var map_BuildRequest = map[string]string{ "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", "env": "env contains additional environment variables you want to pass into a builder container.", "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", - "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", - "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", + "sourceStrategyOptions": "sourceStrategyOptions contains additional source-strategy specific options for the build", } func (BuildRequest) SwaggerDoc() map[string]string { @@ -229,7 +229,7 @@ var map_BuildStatus = map[string]string{ "output": "output describes the container image the build has produced.", "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", - "conditions": "Conditions represents the latest available observations of a build's current state.", + "conditions": "conditions represents the latest available observations of a build's current state.", } func (BuildStatus) SwaggerDoc() map[string]string { @@ -260,7 +260,7 @@ var map_BuildStrategy = map[string]string{ "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", "customStrategy": "customStrategy holds the parameters to the Custom build strategy", - "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", + "jenkinsPipelineStrategy": "jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", } func (BuildStrategy) SwaggerDoc() map[string]string { @@ -271,10 +271,10 @@ var map_BuildTriggerCause = map[string]string{ "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", - "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", + "githubWebHook": "githubWebHook represents data for a GitHub webhook that fired a specific build.", "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", - "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.", - "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", + "gitlabWebHook": "gitlabWebHook represents data for a GitLab webhook that fired a specific build.", + "bitbucketWebHook": "bitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", } func (BuildTriggerCause) SwaggerDoc() map[string]string { @@ -347,8 +347,8 @@ func (CommonSpec) SwaggerDoc() map[string]string { var map_CommonWebHookCause = map[string]string{ "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", - "revision": "Revision is the git source revision information of the trigger.", - "secret": "Secret is the obfuscated webhook secret that triggered a build.", + "revision": "revision is the git source revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", } func (CommonWebHookCause) SwaggerDoc() map[string]string { @@ -422,7 +422,7 @@ var map_GenericWebHookEvent = map[string]string{ "type": "type is the type of source repository", "git": "git is the git information if the Type is BuildSourceGit", "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", - "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", } func (GenericWebHookEvent) SwaggerDoc() map[string]string { @@ -451,7 +451,7 @@ func (GitHubWebHookCause) SwaggerDoc() map[string]string { var map_GitInfo = map[string]string{ "": "GitInfo is the aggregated git information for a generic webhook post", - "refs": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", + "refs": "refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", } func (GitInfo) SwaggerDoc() map[string]string { @@ -562,8 +562,8 @@ func (ImageStreamTagReference) SwaggerDoc() map[string]string { var map_JenkinsPipelineBuildStrategy = map[string]string{ "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines", - "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", - "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "jenkinsfilePath": "jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", "env": "env contains additional environment variables you want to pass into a build pipeline.", } @@ -594,7 +594,7 @@ func (SecretBuildSource) SwaggerDoc() map[string]string { var map_SecretLocalReference = map[string]string{ "": "SecretLocalReference contains information that points to the local secret being used", - "name": "Name is the name of the resource in the same namespace being referenced", + "name": "name is the name of the resource in the same namespace being referenced", } func (SecretLocalReference) SwaggerDoc() map[string]string { @@ -639,7 +639,7 @@ func (SourceControlUser) SwaggerDoc() map[string]string { var map_SourceRevision = map[string]string{ "": "SourceRevision is the revision or commit information from the source for the build", "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", - "git": "Git contains information about git-based build source", + "git": "git contains information about git-based build source", } func (SourceRevision) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto index 085b49b25..aee82514e 100644 --- a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto @@ -40,12 +40,10 @@ message CloudPrivateIPConfig { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the definition of the desired private IP request. - // +kubebuilder:validation:Required // +required optional CloudPrivateIPConfigSpec spec = 2; // status is the observed status of the desired private IP request. Read-only. - // +kubebuilder:validation:Optional // +optional optional CloudPrivateIPConfigStatus status = 3; } @@ -68,7 +66,6 @@ message CloudPrivateIPConfigList { // +k8s:openapi-gen=true message CloudPrivateIPConfigSpec { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional optional string node = 1; } @@ -77,13 +74,15 @@ message CloudPrivateIPConfigSpec { // +k8s:openapi-gen=true message CloudPrivateIPConfigStatus { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional optional string node = 1; // condition is the assignment condition of the private IP and its status - // +kubebuilder:validation:Required // +required + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go index 4c19e44c3..7508e1505 100644 --- a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go @@ -33,11 +33,9 @@ type CloudPrivateIPConfig struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the definition of the desired private IP request. - // +kubebuilder:validation:Required // +required Spec CloudPrivateIPConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // status is the observed status of the desired private IP request. Read-only. - // +kubebuilder:validation:Optional // +optional Status CloudPrivateIPConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -46,7 +44,6 @@ type CloudPrivateIPConfig struct { // +k8s:openapi-gen=true type CloudPrivateIPConfigSpec struct { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional Node string `json:"node" protobuf:"bytes,1,opt,name=node"` } @@ -55,13 +52,15 @@ type CloudPrivateIPConfigSpec struct { // +k8s:openapi-gen=true type CloudPrivateIPConfigStatus struct { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional Node string `json:"node" protobuf:"bytes,1,opt,name=node"` // condition is the assignment condition of the private IP and its status - // +kubebuilder:validation:Required // +required - Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // CloudPrivateIPConfigConditionType specifies the current condition type of the CloudPrivateIPConfig diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index d4d09e7fe..3e17ca0cc 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -9,7 +9,7 @@ import ( // The namespace must be specified at the point of use. type ConfigMapFileReference struct { Name string `json:"name"` - // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + // key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. Key string `json:"key,omitempty"` } @@ -17,7 +17,6 @@ type ConfigMapFileReference struct { // The namespace must be specified at the point of use. type ConfigMapNameReference struct { // name is the metadata.name of the referenced config map - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -26,7 +25,6 @@ type ConfigMapNameReference struct { // The namespace must be specified at the point of use. type SecretNameReference struct { // name is the metadata.name of the referenced secret - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -35,47 +33,47 @@ type SecretNameReference struct { type HTTPServingInfo struct { // ServingInfo is the HTTP serving information ServingInfo `json:",inline"` - // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` - // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if // -1 there is no limit on requests. RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` } // ServingInfo holds information about serving web pages type ServingInfo struct { - // BindAddress is the ip:port to serve on + // bindAddress is the ip:port to serve on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` // CertInfo is the TLS cert info for serving secure traffic. // this is anonymous so that we can inline it for serialization CertInfo `json:",inline"` - // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates // +optional ClientCA string `json:"clientCA,omitempty"` - // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + // namedCertificates is a list of certificates to use to secure requests to specific hostnames NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` - // MinTLSVersion is the minimum TLS version supported. + // minTLSVersion is the minimum TLS version supported. // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants MinTLSVersion string `json:"minTLSVersion,omitempty"` - // CipherSuites contains an overridden list of ciphers for the server to support. + // cipherSuites contains an overridden list of ciphers for the server to support. // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants CipherSuites []string `json:"cipherSuites,omitempty"` } // CertInfo relates a certificate with a private key type CertInfo struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` - // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile KeyFile string `json:"keyFile"` } // NamedCertificate specifies a certificate/key, and the names it should be served for type NamedCertificate struct { - // Names is a list of DNS names this certificate should be used to secure + // names is a list of DNS names this certificate should be used to secure // A name can be a normal DNS name, or can contain leading wildcard segments. Names []string `json:"names,omitempty"` // CertInfo is the TLS cert info for serving secure traffic @@ -121,24 +119,24 @@ type StringSource struct { // StringSourceSpec specifies a string value, or external location type StringSourceSpec struct { - // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + // value specifies the cleartext value, or an encrypted value if keyFile is specified. Value string `json:"value"` - // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. Env string `json:"env"` - // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. File string `json:"file"` - // KeyFile references a file containing the key to use to decrypt the value. + // keyFile references a file containing the key to use to decrypt the value. KeyFile string `json:"keyFile"` } // RemoteConnectionInfo holds information necessary for establishing a remote connection type RemoteConnectionInfo struct { - // URL is the remote URL to connect to + // url is the remote URL to connect to URL string `json:"url"` - // CA is the CA for verifying TLS connections + // ca is the CA for verifying TLS connections CA string `json:"ca"` // CertInfo is the TLS client cert information to present // this is anonymous so that we can inline it for serialization @@ -160,11 +158,11 @@ type AdmissionConfig struct { // AdmissionPluginConfig holds the necessary configuration options for admission plugins type AdmissionPluginConfig struct { - // Location is the path to a configuration file that contains the plugin's + // location is the path to a configuration file that contains the plugin's // configuration Location string `json:"location"` - // Configuration is an embedded configuration object to be used as the plugin's + // configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +nullable // +kubebuilder:pruning:PreserveUnknownFields @@ -205,9 +203,9 @@ type AuditConfig struct { // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` - // PolicyFile is a path to the file that defines the audit policy configuration. + // policyFile is a path to the file that defines the audit policy configuration. PolicyFile string `json:"policyFile"` - // PolicyConfiguration is an embedded policy configuration object to be used + // policyConfiguration is an embedded policy configuration object to be used // as the audit policy configuration. If present, it will be used instead of // the path to the policy file. // +nullable @@ -225,9 +223,9 @@ type AuditConfig struct { // EtcdConnectionInfo holds information necessary for connecting to an etcd server type EtcdConnectionInfo struct { - // URLs are the URLs for etcd + // urls are the URLs for etcd URLs []string `json:"urls,omitempty"` - // CA is a file containing trusted roots for the etcd server certificates + // ca is a file containing trusted roots for the etcd server certificates CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to etcd // this is anonymous so that we can inline it for serialization @@ -237,7 +235,7 @@ type EtcdConnectionInfo struct { type EtcdStorageConfig struct { EtcdConnectionInfo `json:",inline"` - // StoragePrefix is the path within etcd that the OpenShift resources will + // storagePrefix is the path within etcd that the OpenShift resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. StoragePrefix string `json:"storagePrefix"` @@ -287,7 +285,7 @@ type ClientConnectionOverrides struct { // GenericControllerConfig provides information to configure a controller type GenericControllerConfig struct { - // ServingInfo is the HTTP serving information for the controller's endpoints + // servingInfo is the HTTP serving information for the controller's endpoints ServingInfo HTTPServingInfo `json:"servingInfo"` // leaderElection provides information to elect a leader. Only override this if you have a specific need @@ -324,7 +322,6 @@ type RequiredHSTSPolicy struct { // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required // +required DomainPatterns []string `json:"domainPatterns"` diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index d815556d2..75b647f74 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -27,7 +27,6 @@ type APIServer struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec APIServerSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -129,7 +128,6 @@ type Audit struct { type AuditCustomRule struct { // group is a name of group a request user must be member of in order to this profile to apply. // - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Group string `json:"group"` @@ -146,7 +144,6 @@ type AuditCustomRule struct { // // If unset, the 'Default' profile is used as the default. // - // +kubebuilder:validation:Required // +required Profile AuditProfileType `json:"profile,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index f6f0c12a3..a2af4d654 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -26,7 +26,6 @@ type Authentication struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec AuthenticationSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -81,7 +80,7 @@ type AuthenticationSpec struct { // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` - // OIDCProviders are OIDC identity providers that can issue tokens + // oidcProviders are OIDC identity providers that can issue tokens // for this cluster // Can only be set if "Type" is set to "OIDC". // @@ -110,7 +109,7 @@ type AuthenticationStatus struct { // The namespace for this config map is openshift-config-managed. IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` - // OIDCClients is where participating operators place the current OIDC client status + // oidcClients is where participating operators place the current OIDC client status // for OIDC clients that can be customized by the cluster-admin. // // +listType=map @@ -181,7 +180,6 @@ type WebhookTokenAuthenticator struct { // The key "kubeConfig" is used to locate the data. // If the secret or expected key is not found, the webhook is not honored. // If the specified kube config data is not valid, the webhook is not honored. - // +kubebuilder:validation:Required // +required KubeConfig SecretNameReference `json:"kubeConfig"` } @@ -195,19 +193,17 @@ const ( ) type OIDCProvider struct { - // Name of the OIDC provider + // name of the OIDC provider // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required Name string `json:"name"` - // Issuer describes atributes of the OIDC token issuer + // issuer describes atributes of the OIDC token issuer // - // +kubebuilder:validation:Required // +required Issuer TokenIssuer `json:"issuer"` - // OIDCClients contains configuration for the platform's clients that + // oidcClients contains configuration for the platform's clients that // need to request tokens from the issuer // // +listType=map @@ -216,11 +212,11 @@ type OIDCProvider struct { // +kubebuilder:validation:MaxItems=20 OIDCClients []OIDCClientConfig `json:"oidcClients"` - // ClaimMappings describes rules on how to transform information from an + // claimMappings describes rules on how to transform information from an // ID token into a cluster identity ClaimMappings TokenClaimMappings `json:"claimMappings"` - // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + // claimValidationRules are rules that are applied to validate token claims to authenticate users. // // +listType=atomic ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` @@ -234,17 +230,15 @@ type TokenIssuer struct { // Must use the https:// scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required // +required URL string `json:"issuerURL"` - // Audiences is an array of audiences that the token was issued for. + // audiences is an array of audiences that the token was issued for. // Valid tokens must include at least one of these values in their // "aud" claim. // Must be set to exactly one value. // // +listType=set - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=10 // +required @@ -258,94 +252,88 @@ type TokenIssuer struct { } type TokenClaimMappings struct { - // Username is a name of the claim that should be used to construct + // username is a name of the claim that should be used to construct // usernames for the cluster identity. // // Default value: "sub" Username UsernameClaimMapping `json:"username,omitempty"` - // Groups is a name of the claim that should be used to construct + // groups is a name of the claim that should be used to construct // groups for the cluster identity. // The referenced claim must use array of strings values. Groups PrefixedClaimMapping `json:"groups,omitempty"` } type TokenClaimMapping struct { - // Claim is a JWT token claim to be used in the mapping + // claim is a JWT token claim to be used in the mapping // - // +kubebuilder:validation:Required // +required Claim string `json:"claim"` } type OIDCClientConfig struct { - // ComponentName is the name of the component that is supposed to consume this + // componentName is the name of the component that is supposed to consume this // client configuration // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required ComponentName string `json:"componentName"` - // ComponentNamespace is the namespace of the component that is supposed to consume this + // componentNamespace is the namespace of the component that is supposed to consume this // client configuration // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required ComponentNamespace string `json:"componentNamespace"` - // ClientID is the identifier of the OIDC client from the OIDC provider + // clientID is the identifier of the OIDC client from the OIDC provider // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required ClientID string `json:"clientID"` - // ClientSecret refers to a secret in the `openshift-config` namespace that + // clientSecret refers to a secret in the `openshift-config` namespace that // contains the client secret in the `clientSecret` key of the `.data` field ClientSecret SecretNameReference `json:"clientSecret"` - // ExtraScopes is an optional set of scopes to request tokens with. + // extraScopes is an optional set of scopes to request tokens with. // // +listType=set ExtraScopes []string `json:"extraScopes"` } type OIDCClientStatus struct { - // ComponentName is the name of the component that will consume a client configuration. + // componentName is the name of the component that will consume a client configuration. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required ComponentName string `json:"componentName"` - // ComponentNamespace is the namespace of the component that will consume a client configuration. + // componentNamespace is the namespace of the component that will consume a client configuration. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required ComponentNamespace string `json:"componentNamespace"` - // CurrentOIDCClients is a list of clients that the component is currently using. + // currentOIDCClients is a list of clients that the component is currently using. // // +listType=map // +listMapKey=issuerURL // +listMapKey=clientID CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - // ConsumingUsers is a slice of ServiceAccounts that need to have read + // consumingUsers is a slice of ServiceAccounts that need to have read // permission on the `clientSecret` secret. // // +kubebuilder:validation:MaxItems=5 // +listType=set ConsumingUsers []ConsumingUser `json:"consumingUsers"` - // Conditions are used to communicate the state of the `oidcClients` entry. + // conditions are used to communicate the state of the `oidcClients` entry. // // Supported conditions include Available, Degraded and Progressing. // @@ -355,6 +343,7 @@ type OIDCClientStatus struct { // // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -362,7 +351,6 @@ type OIDCClientReference struct { // OIDCName refers to the `name` of the provider from `oidcProviders` // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required OIDCProviderName string `json:"oidcProviderName"` @@ -370,14 +358,12 @@ type OIDCClientReference struct { // Must use the https:// scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required // +required IssuerURL string `json:"issuerURL"` - // ClientID is the identifier of the OIDC client from the OIDC provider + // clientID is the identifier of the OIDC client from the OIDC provider // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required ClientID string `json:"clientID"` } @@ -386,7 +372,7 @@ type OIDCClientReference struct { type UsernameClaimMapping struct { TokenClaimMapping `json:",inline"` - // PrefixPolicy specifies how a prefix should apply. + // prefixPolicy specifies how a prefix should apply. // // By default, claims other than `email` will be prefixed with the issuer URL to // prevent naming clashes with other plugins. @@ -427,7 +413,6 @@ var ( ) type UsernamePrefix struct { - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required PrefixString string `json:"prefixString"` @@ -436,7 +421,7 @@ type UsernamePrefix struct { type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` - // Prefix is a string to prefix the value from the token in the result of the + // prefix is a string to prefix the value from the token in the result of the // claim mapping. // // By default, no prefixing occurs. @@ -454,30 +439,28 @@ const ( ) type TokenClaimValidationRule struct { - // Type sets the type of the validation rule + // type sets the type of the validation rule // // +kubebuilder:validation:Enum={"RequiredClaim"} // +kubebuilder:default="RequiredClaim" Type TokenValidationRuleType `json:"type"` - // RequiredClaim allows configuring a required claim name and its expected + // requiredClaim allows configuring a required claim name and its expected // value RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` } type TokenRequiredClaim struct { - // Claim is a name of a required claim. Only claims with string values are + // claim is a name of a required claim. Only claims with string values are // supported. // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required Claim string `json:"claim"` - // RequiredValue is the required value for the claim. + // requiredValue is the required value for the claim. // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required RequiredValue string `json:"requiredValue"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index dad47666d..dcde1fc5b 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -29,14 +29,13 @@ type Build struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the build controller configuration - // +kubebuilder:validation:Required + // spec holds user-settable values for the build controller configuration // +required Spec BuildSpec `json:"spec"` } type BuildSpec struct { - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that // should be trusted for image pushes and pulls during builds. // The namespace for this config map is openshift-config. // @@ -45,16 +44,16 @@ type BuildSpec struct { // // +optional AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` - // BuildDefaults controls the default information for Builds + // buildDefaults controls the default information for Builds // +optional BuildDefaults BuildDefaults `json:"buildDefaults"` - // BuildOverrides controls override settings for builds + // buildOverrides controls override settings for builds // +optional BuildOverrides BuildOverrides `json:"buildOverrides"` } type BuildDefaults struct { - // DefaultProxy contains the default proxy settings for all build operations, including image pull/push + // defaultProxy contains the default proxy settings for all build operations, including image pull/push // and source download. // // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables @@ -62,55 +61,55 @@ type BuildDefaults struct { // +optional DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` - // GitProxy contains the proxy settings for git operations only. If set, this will override + // gitProxy contains the proxy settings for git operations only. If set, this will override // any Proxy settings for all git commands, such as git clone. // // Values that are not set here will be inherited from DefaultProxy. // +optional GitProxy *ProxySpec `json:"gitProxy,omitempty"` - // Env is a set of default environment variables that will be applied to the + // env is a set of default environment variables that will be applied to the // build if the specified variables do not exist on the build // +optional Env []corev1.EnvVar `json:"env,omitempty"` - // ImageLabels is a list of docker labels that are applied to the resulting image. + // imageLabels is a list of docker labels that are applied to the resulting image. // User can override a default label by providing a label with the same name in their // Build/BuildConfig. // +optional ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - // Resources defines resource requirements to execute the build. + // resources defines resource requirements to execute the build. // +optional Resources corev1.ResourceRequirements `json:"resources"` } type ImageLabel struct { - // Name defines the name of the label. It must have non-zero length. + // name defines the name of the label. It must have non-zero length. Name string `json:"name"` - // Value defines the literal value of the label. + // value defines the literal value of the label. // +optional Value string `json:"value,omitempty"` } type BuildOverrides struct { - // ImageLabels is a list of docker labels that are applied to the resulting image. + // imageLabels is a list of docker labels that are applied to the resulting image. // If user provided a label in their Build/BuildConfig with the same name as one in this // list, the user's label will be overwritten. // +optional ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - // NodeSelector is a selector which must be true for the build pod to fit on a node + // nodeSelector is a selector which must be true for the build pod to fit on a node // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // Tolerations is a list of Tolerations that will override any existing + // tolerations is a list of Tolerations that will override any existing // tolerations set on a build pod. // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - // ForcePull overrides, if set, the equivalent value in the builds, + // forcePull overrides, if set, the equivalent value in the builds, // i.e. false disables force pull for all builds, // true enables force pull for all builds, // independently of what each build specifies itself diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 7951762cc..a447adb9f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -34,7 +34,6 @@ type ClusterOperator struct { metav1.ObjectMeta `json:"metadata"` // spec holds configuration that could apply to any operator. - // +kubebuilder:validation:Required // +required Spec ClusterOperatorSpec `json:"spec"` @@ -54,6 +53,8 @@ type ClusterOperatorStatus struct { // conditions describes the state of the operator's managed and monitored components. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type // +optional Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` @@ -80,14 +81,12 @@ type ClusterOperatorStatus struct { type OperandVersion struct { // name is the name of the particular operand this version is for. It usually matches container images, not operators. - // +kubebuilder:validation:Required // +required Name string `json:"name"` // version indicates which version of a particular operand is currently being managed. It must always match the Available // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout // 1.1.0 - // +kubebuilder:validation:Required // +required Version string `json:"version"` } @@ -95,18 +94,15 @@ type OperandVersion struct { // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // group of the referent. - // +kubebuilder:validation:Required // +required Group string `json:"group"` // resource of the referent. - // +kubebuilder:validation:Required // +required Resource string `json:"resource"` // namespace of the referent. // +optional Namespace string `json:"namespace,omitempty"` // name of the referent. - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -128,17 +124,14 @@ const ( // +k8s:deepcopy-gen=true type ClusterOperatorStatusCondition struct { // type specifies the aspect reported by this condition. - // +kubebuilder:validation:Required // +required Type ClusterStatusConditionType `json:"type"` // status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required // +required Status ConditionStatus `json:"status"` // lastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required // +required LastTransitionTime metav1.Time `json:"lastTransitionTime"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 61386a72e..092bebff0 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -34,7 +34,6 @@ type ClusterVersion struct { // spec is the desired state of the cluster version - the operator will work // to ensure that the desired version is applied to the cluster. - // +kubebuilder:validation:Required // +required Spec ClusterVersionSpec `json:"spec"` // status contains information about the available updates and any in-progress @@ -51,7 +50,6 @@ type ClusterVersionSpec struct { // clusterID uniquely identifies this cluster. This is expected to be // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in // hexadecimal values). This is a required field. - // +kubebuilder:validation:Required // +required ClusterID ClusterID `json:"clusterID"` @@ -64,7 +62,7 @@ type ClusterVersionSpec struct { // // Some of the fields are inter-related with restrictions and meanings described here. // 1. image is specified, version is specified, architecture is specified. API validation error. - // 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. + // 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. // 3. image is specified, version is not specified, architecture is specified. API validation error. // 4. image is specified, version is not specified, architecture is not specified. image is used. // 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. @@ -138,7 +136,6 @@ type ClusterVersionStatus struct { // desired is the version that the cluster is reconciling towards. // If the cluster is not yet fully initialized desired will be set // with the information available, which may be an image or a tag. - // +kubebuilder:validation:Required // +required Desired Release `json:"desired"` @@ -156,14 +153,12 @@ type ClusterVersionStatus struct { // observedGeneration reports which version of the spec is being synced. // If this value is not equal to metadata.generation, then the desired // and conditions fields may represent a previous version. - // +kubebuilder:validation:Required // +required ObservedGeneration int64 `json:"observedGeneration"` // versionHash is a fingerprint of the content that the cluster will be // updated with. It is used by the operator to avoid unnecessary work // and is for internal use only. - // +kubebuilder:validation:Required // +required VersionHash string `json:"versionHash"` @@ -190,7 +185,6 @@ type ClusterVersionStatus struct { // may be empty if no updates are recommended, if the update service // is unavailable, or if an invalid channel has been specified. // +nullable - // +kubebuilder:validation:Required // +listType=atomic // +required AvailableUpdates []Release `json:"availableUpdates"` @@ -226,12 +220,10 @@ type UpdateHistory struct { // indicates the update is not fully applied, while the Completed state // indicates the update was successfully rolled out at least once (all // parts of the update successfully applied). - // +kubebuilder:validation:Required // +required State UpdateState `json:"state"` // startedTime is the time at which the update was started. - // +kubebuilder:validation:Required // +required StartedTime metav1.Time `json:"startedTime"` @@ -239,7 +231,6 @@ type UpdateHistory struct { // that is currently being applied will have a null completion time. // Completion time will always be set for entries that are not the current // update (usually to the started time of the next update). - // +kubebuilder:validation:Required // +required // +nullable CompletionTime *metav1.Time `json:"completionTime"` @@ -253,7 +244,6 @@ type UpdateHistory struct { // image is a container image location that contains the update. This value // is always populated. - // +kubebuilder:validation:Required // +required Image string `json:"image"` @@ -261,7 +251,6 @@ type UpdateHistory struct { // before it was installed. If this is false the cluster may not be trusted. // Verified does not cover upgradeable checks that depend on the cluster // state at the time when the update target was accepted. - // +kubebuilder:validation:Required // +required Verified bool `json:"verified"` @@ -288,7 +277,7 @@ const ( ) // ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager;OperatorLifecycleManagerV1 type ClusterVersionCapability string const ( @@ -379,10 +368,14 @@ const ( // allows to distribute Docker images ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry" - // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager + // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager (legacy) // which itself manages the lifecycle of operators ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" + // ClusterVersionCapabilityOperatorLifecycleManagerV1 manages the Operator Lifecycle Manager (v1) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManagerV1 ClusterVersionCapability = "OperatorLifecycleManagerV1" + // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers // in openshift cluster ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" @@ -422,6 +415,7 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -600,6 +594,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -618,6 +613,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -664,28 +660,23 @@ type ClusterVersionCapabilitiesStatus struct { // +k8s:deepcopy-gen=true type ComponentOverride struct { // kind indentifies which object to override. - // +kubebuilder:validation:Required // +required Kind string `json:"kind"` // group identifies the API group that the kind is in. - // +kubebuilder:validation:Required // +required Group string `json:"group"` // namespace is the component's namespace. If the resource is cluster // scoped, the namespace should be empty. - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` // name is the component's name. - // +kubebuilder:validation:Required // +required Name string `json:"name"` // unmanaged controls if cluster version operator should stop managing the // resources in this cluster. // Default: false - // +kubebuilder:validation:Required // +required Unmanaged bool `json:"unmanaged"` } @@ -694,8 +685,8 @@ type ComponentOverride struct { type URL string // Update represents an administrator update request. -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == '' || self.image == '') : true",message="cannot set both Architecture and Image" -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != '' ? self.version != '' : true",message="Version must be set if Architecture is set" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == \"\" || self.image == \"\") : true",message="cannot set both Architecture and Image" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != \"\" ? self.version != \"\" : true",message="Version must be set if Architecture is set" // +k8s:deepcopy-gen=true type Update struct { // architecture is an optional field that indicates the desired @@ -711,16 +702,16 @@ type Update struct { Architecture ClusterVersionArchitecture `json:"architecture"` // version is a semantic version identifying the update version. - // version is ignored if image is specified and required if - // architecture is specified. + // version is required if architecture is specified. + // If both version and image are set, the version extracted from the referenced image must match the specified version. // // +optional Version string `json:"version"` // image is a container image location that contains the update. // image should be used when the desired version does not exist in availableUpdates or history. - // When image is set, version is ignored. When image is set, version should be empty. // When image is set, architecture cannot be specified. + // If both version and image are set, the version extracted from the referenced image must match the specified version. // // +optional Image string `json:"image"` @@ -739,6 +730,16 @@ type Update struct { // Release represents an OpenShift release image and associated metadata. // +k8s:deepcopy-gen=true type Release struct { + // architecture is an optional field that indicates the + // value of the cluster architecture. In this context cluster + // architecture means either a single architecture or a multi + // architecture. + // Valid values are 'Multi' and empty. + // + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + Architecture ClusterVersionArchitecture `json:"architecture,omitempty"` + // version is a semantic version identifying the update version. When this // field is part of spec, version is optional if image is specified. // +required @@ -776,7 +777,6 @@ const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" // may not be recommended for the current cluster. type ConditionalUpdate struct { // release is the target of the update. - // +kubebuilder:validation:Required // +required Release Release `json:"release"` @@ -785,7 +785,6 @@ type ConditionalUpdate struct { // operator will evaluate all entries, and only recommend the // update if there is at least one entry and all entries // recommend the update. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +patchMergeKey=name // +patchStrategy=merge @@ -797,11 +796,10 @@ type ConditionalUpdate struct { // conditions represents the observations of the conditional update's // current status. Known types are: // * Recommended, for whether the update is recommended for the current cluster. - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` } // ConditionalUpdateRisk represents a reason and cluster-state @@ -809,7 +807,6 @@ type ConditionalUpdate struct { // +k8s:deepcopy-gen=true type ConditionalUpdateRisk struct { // url contains information about this risk. - // +kubebuilder:validation:Required // +kubebuilder:validation:Format=uri // +kubebuilder:validation:MinLength=1 // +required @@ -818,7 +815,6 @@ type ConditionalUpdateRisk struct { // name is the CamelCase reason for not recommending a // conditional update, in the event that matchingRules match the // cluster state. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Name string `json:"name"` @@ -828,7 +824,6 @@ type ConditionalUpdateRisk struct { // state. This is only to be consumed by humans. It may // contain Line Feed characters (U+000A), which should be // rendered as new lines. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Message string `json:"message"` @@ -839,7 +834,6 @@ type ConditionalUpdateRisk struct { // operator will walk the slice in order, and stop after the // first it can successfully evaluate. If no condition can be // successfully evaluated, the update will not be recommended. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +listType=atomic // +required @@ -854,24 +848,22 @@ type ConditionalUpdateRisk struct { type ClusterCondition struct { // type represents the cluster-condition type. This defines // the members and semantics of any additional properties. - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum={"Always","PromQL"} // +required Type string `json:"type"` - // promQL represents a cluster condition based on PromQL. + // promql represents a cluster condition based on PromQL. // +optional PromQL *PromQLClusterCondition `json:"promql,omitempty"` } // PromQLClusterCondition represents a cluster condition based on PromQL. type PromQLClusterCondition struct { - // PromQL is a PromQL query classifying clusters. This query + // promql is a PromQL query classifying clusters. This query // query should return a 1 in the match case and a 0 in the // does-not-match case. Queries which return no time // series, or which return values besides 0 or 1, are // evaluation failures. - // +kubebuilder:validation:Required // +required PromQL string `json:"promql"` } @@ -900,7 +892,7 @@ type SignatureStore struct { // // +kubebuilder:validation:Type=string // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" - // +kubebuilder:validation:Required + // +required URL string `json:"url"` // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index e8f197b34..0ccc4a8f8 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -28,7 +28,6 @@ type Console struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ConsoleSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 5daa5d78d..06eb75ccf 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -24,7 +24,6 @@ type DNS struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec DNSSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -121,7 +120,7 @@ type DNSPlatformSpec struct { // and must handle unrecognized platforms with best-effort defaults. // // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" Type PlatformType `json:"type"` diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 88d94ac52..0709a75ae 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -26,7 +26,6 @@ type FeatureGate struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" Spec FeatureGateSpec `json:"spec"` @@ -100,6 +99,7 @@ type FeatureGateStatus struct { // Known .status.conditions.type are: "DeterminationDegraded" // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. @@ -117,7 +117,6 @@ type FeatureGateStatus struct { type FeatureGateDetails struct { // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. - // +kubebuilder:validation:Required // +required Version string `json:"version"` // enabled is a list of all feature gates that are enabled in the cluster for the named version. @@ -130,7 +129,7 @@ type FeatureGateDetails struct { type FeatureGateAttributes struct { // name is the name of the FeatureGate. - // +kubebuilder:validation:Required + // +required Name FeatureGateName `json:"name"` // possible (probable?) future additions include diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index d3c694a56..82f46c8b6 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -29,7 +29,6 @@ type Image struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -162,6 +161,8 @@ type RegistryLocation struct { } // RegistrySources holds cluster-wide information about how to handle the registries config. +// +// +kubebuilder:validation:XValidation:rule="has(self.blockedRegistries) ? !has(self.allowedRegistries) : true",message="Only one of blockedRegistries or allowedRegistries may be set" type RegistrySources struct { // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index 74df4027f..0bd0d7770 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -25,7 +25,6 @@ type ImageContentPolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageContentPolicySpec `json:"spec"` } @@ -76,7 +75,6 @@ type ImageContentPolicyList struct { type RepositoryDigestMirrors struct { // source is the repository that users refer to, e.g. in image pull specifications. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` Source string `json:"source"` // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index 43d748c0c..df2258d12 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -25,7 +25,6 @@ type ImageDigestMirrorSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageDigestMirrorSetSpec `json:"spec"` // status contains the observed state of the resource. @@ -110,7 +109,6 @@ type ImageDigestMirrors struct { // for more information about the format, see the document about the location field: // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` Source string `json:"source"` // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index ca8d35515..b7e1a6a87 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -25,7 +25,6 @@ type ImageTagMirrorSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageTagMirrorSetSpec `json:"spec"` // status contains the observed state of the resource. @@ -95,7 +94,6 @@ type ImageTagMirrors struct { // for more information about the format, see the document about the location field: // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` Source string `json:"source"` // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 0daa62d30..f10ccb855 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -27,7 +27,6 @@ type Infrastructure struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec InfrastructureSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -99,7 +98,8 @@ type InfrastructureStatus struct { // The 'External' mode indicates that the control plane is hosted externally to the cluster and that // its components are not visible within the cluster. // +kubebuilder:default=HighlyAvailable - // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control @@ -136,9 +136,15 @@ const ( // "HighlyAvailable" is for operators to configure high-availability as much as possible. HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + // "HighlyAvailableArbiter" is for operators to configure for an arbiter HA deployment. + HighlyAvailableArbiterMode TopologyMode = "HighlyAvailableArbiter" + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. SingleReplicaTopologyMode TopologyMode = "SingleReplica" + // "DualReplica" is for operators to configure for two node topology. + DualReplicaTopologyMode TopologyMode = "DualReplica" + // "External" indicates that the component is running externally to the cluster. When specified // as the control plane topology, operators should avoid scheduling workloads to masters or assume // that any of the control plane components such as kubernetes API server or etcd are visible within @@ -257,7 +263,7 @@ const ( // ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. type ExternalPlatformSpec struct { - // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + // platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. // This field is solely for informational and reporting purposes and is not expected to be used for decision-making. // +kubebuilder:default:="Unknown" // +default="Unknown" @@ -283,55 +289,55 @@ type PlatformSpec struct { // +unionDiscriminator Type PlatformType `json:"type"` - // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // aws contains settings specific to the Amazon Web Services infrastructure provider. // +optional AWS *AWSPlatformSpec `json:"aws,omitempty"` - // Azure contains settings specific to the Azure infrastructure provider. + // azure contains settings specific to the Azure infrastructure provider. // +optional Azure *AzurePlatformSpec `json:"azure,omitempty"` - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. // +optional GCP *GCPPlatformSpec `json:"gcp,omitempty"` - // BareMetal contains settings specific to the BareMetal platform. + // baremetal contains settings specific to the BareMetal platform. // +optional BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` - // OpenStack contains settings specific to the OpenStack infrastructure provider. + // openstack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` - // Ovirt contains settings specific to the oVirt infrastructure provider. + // ovirt contains settings specific to the oVirt infrastructure provider. // +optional Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` - // VSphere contains settings specific to the VSphere infrastructure provider. + // vsphere contains settings specific to the VSphere infrastructure provider. // +optional VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // kubevirt contains settings specific to the kubevirt infrastructure provider. // +optional Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` - // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. // +optional PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. // +optional AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` - // Nutanix contains settings specific to the Nutanix infrastructure provider. + // nutanix contains settings specific to the Nutanix infrastructure provider. // +optional Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` @@ -401,59 +407,59 @@ type PlatformStatus struct { // Currently this value cannot be changed once set. Type PlatformType `json:"type"` - // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // aws contains settings specific to the Amazon Web Services infrastructure provider. // +optional AWS *AWSPlatformStatus `json:"aws,omitempty"` - // Azure contains settings specific to the Azure infrastructure provider. + // azure contains settings specific to the Azure infrastructure provider. // +optional Azure *AzurePlatformStatus `json:"azure,omitempty"` - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. // +optional GCP *GCPPlatformStatus `json:"gcp,omitempty"` - // BareMetal contains settings specific to the BareMetal platform. + // baremetal contains settings specific to the BareMetal platform. // +optional BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` - // OpenStack contains settings specific to the OpenStack infrastructure provider. + // openstack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` - // Ovirt contains settings specific to the oVirt infrastructure provider. + // ovirt contains settings specific to the oVirt infrastructure provider. // +optional Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` - // VSphere contains settings specific to the VSphere infrastructure provider. + // vsphere contains settings specific to the VSphere infrastructure provider. // +optional VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` - // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // kubevirt contains settings specific to the kubevirt infrastructure provider. // +optional Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` - // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. // +optional PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. // +optional AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` - // Nutanix contains settings specific to the Nutanix infrastructure provider. + // nutanix contains settings specific to the Nutanix infrastructure provider. // +optional Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` - // External contains settings specific to the generic External infrastructure provider. + // external contains settings specific to the generic External infrastructure provider. // +optional External *ExternalPlatformStatus `json:"external,omitempty"` } @@ -492,7 +498,7 @@ type AWSPlatformStatus struct { // region holds the default AWS region for new AWS resources created by the cluster. Region string `json:"region"` - // ServiceEndpoints list contains custom endpoints which will override default + // serviceEndpoints list contains custom endpoints which will override default // service endpoint of AWS Services. // There must be only one ServiceEndpoint for a service. // +listType=atomic @@ -525,20 +531,22 @@ type AWSPlatformStatus struct { // AWSResourceTag is a tag to apply to AWS resources created for the cluster. type AWSResourceTag struct { - // key is the key of the tag - // +kubebuilder:validation:Required + // key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. + // Key should consist of between 1 and 128 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag key. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" // +required Key string `json:"key"` - // value is the value of the tag. + // value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. + // Value should consist of between 1 and 256 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. // Some AWS service do not support empty values. Since tags are added to resources in many services, the // length of the tag value must meet the requirements of all services. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag value. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" // +required Value string `json:"value"` } @@ -584,14 +592,14 @@ type AzureResourceTag struct { // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric // characters and the following special characters `_ . -`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` Key string `json:"key"` // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` @@ -619,6 +627,69 @@ const ( AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" ) +// GCPServiceEndpointName is the name of the GCP Service Endpoint. +// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;ServiceUsage;Storage;TagManager +type GCPServiceEndpointName string + +const ( + // GCPServiceEndpointNameCompute is the name used for the GCP Compute Service endpoint. + GCPServiceEndpointNameCompute GCPServiceEndpointName = "Compute" + + // GCPServiceEndpointNameContainer is the name used for the GCP Container Service endpoint. + GCPServiceEndpointNameContainer GCPServiceEndpointName = "Container" + + // GCPServiceEndpointNameCloudResource is the name used for the GCP Resource Manager Service endpoint. + GCPServiceEndpointNameCloudResource GCPServiceEndpointName = "CloudResourceManager" + + // GCPServiceEndpointNameDNS is the name used for the GCP DNS Service endpoint. + GCPServiceEndpointNameDNS GCPServiceEndpointName = "DNS" + + // GCPServiceEndpointNameFile is the name used for the GCP File Service endpoint. + GCPServiceEndpointNameFile GCPServiceEndpointName = "File" + + // GCPServiceEndpointNameIAM is the name used for the GCP IAM Service endpoint. + GCPServiceEndpointNameIAM GCPServiceEndpointName = "IAM" + + // GCPServiceEndpointNameServiceUsage is the name used for the GCP Service Usage Service endpoint. + GCPServiceEndpointNameServiceUsage GCPServiceEndpointName = "ServiceUsage" + + // GCPServiceEndpointNameStorage is the name used for the GCP Storage Service endpoint. + GCPServiceEndpointNameStorage GCPServiceEndpointName = "Storage" + + // GCPServiceEndpointNameTagManager is the name used for the GCP Tag Manager Service endpoint. + GCPServiceEndpointNameTagManager GCPServiceEndpointName = "TagManager" +) + +// GCPServiceEndpoint store the configuration of a custom url to +// override existing defaults of GCP Services. +type GCPServiceEndpoint struct { + // name is the name of the GCP service whose endpoint is being overridden. + // This must be provided and cannot be empty. + // + // Allowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, + // Storage, and TagManager. + // + // As an example, when setting the name to Compute all requests made by the caller to the GCP Compute + // Service will be directed to the endpoint specified in the url field. + // + // +required + Name GCPServiceEndpointName `json:"name"` + + // url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified + // in the name field. + // url is required, must use the scheme https, must not be more than 253 characters in length, + // and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL) + // + // An example of a valid endpoint that overrides the Compute Service: "https://compute-myendpoint1.p.googleapis.com" + // + // +required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) ? (url(self).getScheme() == \"https\") : true",message="scheme must be https" + // +kubebuilder:validation:XValidation:rule="url(self).getEscapedPath() == \"\" || url(self).getEscapedPath() == \"/\"",message="url must consist only of a scheme and domain. The url path must be empty." + URL string `json:"url"` +} + // GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. // This only includes fields that can be modified in the cluster. type GCPPlatformSpec struct{} @@ -674,6 +745,19 @@ type GCPPlatformStatus struct { // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // serviceEndpoints specifies endpoints that override the default endpoints + // used when creating clients to interact with GCP services. + // When not specified, the default endpoint for the GCP region will be used. + // Only 1 endpoint override is permitted for each GCP service. + // The maximum number of endpoint overrides allowed is 9. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=9 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" + // +optional + // +openshift:enable:FeatureGate=GCPCustomAPIEndpoints + ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` } // GCPResourceLabel is a label to apply to GCP resources created for the cluster. @@ -683,7 +767,7 @@ type GCPResourceLabel struct { // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` // and `openshift-io`. // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` @@ -691,7 +775,7 @@ type GCPResourceLabel struct { // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` @@ -707,7 +791,7 @@ type GCPResourceTag struct { // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, // and hyphens, and must start with a letter, and cannot end with a hyphen. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` @@ -716,7 +800,7 @@ type GCPResourceTag struct { // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `._-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` @@ -725,7 +809,7 @@ type GCPResourceTag struct { // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` @@ -1162,12 +1246,34 @@ type VSpherePlatformLoadBalancer struct { Type PlatformLoadBalancerType `json:"type,omitempty"` } -// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and -// the vCenter topology of that failure domain. +// The VSphereFailureDomainZoneType is a string representation of a failure domain +// zone type. There are two supportable types HostGroup and ComputeCluster +// +enum +type VSphereFailureDomainZoneType string + +// The VSphereFailureDomainRegionType is a string representation of a failure domain +// region type. There are two supportable types ComputeCluster and Datacenter +// +enum +type VSphereFailureDomainRegionType string + +const ( + // HostGroupFailureDomainZone is a failure domain zone for a vCenter vm-host group. + HostGroupFailureDomainZone VSphereFailureDomainZoneType = "HostGroup" + // ComputeClusterFailureDomainZone is a failure domain zone for a vCenter compute cluster. + ComputeClusterFailureDomainZone VSphereFailureDomainZoneType = "ComputeCluster" + // DatacenterFailureDomainRegion is a failure domain region for a vCenter datacenter. + DatacenterFailureDomainRegion VSphereFailureDomainRegionType = "Datacenter" + // ComputeClusterFailureDomainRegion is a failure domain region for a vCenter compute cluster. + ComputeClusterFailureDomainRegion VSphereFailureDomainRegionType = "ComputeCluster" +) + +// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'HostGroup' ? has(self.regionAffinity) && self.regionAffinity.type == 'ComputeCluster' : true",message="when zoneAffinity type is HostGroup, regionAffinity type must be ComputeCluster" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'ComputeCluster' ? has(self.regionAffinity) && self.regionAffinity.type == 'Datacenter' : true",message="when zoneAffinity type is ComputeCluster, regionAffinity type must be Datacenter" type VSpherePlatformFailureDomainSpec struct { // name defines the arbitrary but unique name // of a failure domain. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 Name string `json:"name"` @@ -1177,7 +1283,7 @@ type VSpherePlatformFailureDomainSpec struct { // category in vCenter must be named openshift-region. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required Region string `json:"region"` // zone defines the name of a zone tag that will @@ -1185,19 +1291,34 @@ type VSpherePlatformFailureDomainSpec struct { // category in vCenter must be named openshift-zone. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required Zone string `json:"zone"` + // regionAffinity holds the type of region, Datacenter or ComputeCluster. + // When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + // When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + RegionAffinity *VSphereFailureDomainRegionAffinity `json:"regionAffinity,omitempty"` + + // zoneAffinity holds the type of the zone and the hostGroup which + // vmGroup and the hostGroup names in vCenter corresponds to + // a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + ZoneAffinity *VSphereFailureDomainZoneAffinity `json:"zoneAffinity,omitempty"` + // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // --- // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname Server string `json:"server"` - // Topology describes a given failure domain using vSphere constructs - // +kubebuilder:validation:Required + // topology describes a given failure domain using vSphere constructs + // +required Topology VSpherePlatformTopology `json:"topology"` } @@ -1206,7 +1327,7 @@ type VSpherePlatformFailureDomainSpec struct { type VSpherePlatformTopology struct { // datacenter is the name of vCenter datacenter in which virtual machines will be located. // The maximum length of the datacenter name is 80 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=80 Datacenter string `json:"datacenter"` @@ -1214,7 +1335,7 @@ type VSpherePlatformTopology struct { // in which virtual machine will be located. // The absolute path is of the form //host/. // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/host/.*?` ComputeCluster string `json:"computeCluster"` @@ -1227,7 +1348,7 @@ type VSpherePlatformTopology struct { // `govc ls 'network/*'` // Networks should be in the form of an absolute path: // //network/. - // +kubebuilder:validation:Required + // +required // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10 // +kubebuilder:validation:MinItems=1 @@ -1238,7 +1359,7 @@ type VSpherePlatformTopology struct { // virtual machine is located. // The absolute path is of the form //datastore/ // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?` Datastore string `json:"datastore"` @@ -1277,12 +1398,80 @@ type VSpherePlatformTopology struct { Template string `json:"template,omitempty"` } +// VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) +// and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. +// This configuration within vCenter creates the required association between a failure domain, virtual machines +// and ESXi hosts to create a vm-host based zone. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'HostGroup' ? has(self.hostGroup) : !has(self.hostGroup)",message="hostGroup is required when type is HostGroup, and forbidden otherwise" +// +union +type VSphereFailureDomainZoneAffinity struct { + // type determines the vSphere object type for a zone within this failure domain. + // Available types are ComputeCluster and HostGroup. + // When set to ComputeCluster, this means the vCenter cluster defined is the zone. + // When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + // this means the zone is defined by the grouping of those fields. + // +kubebuilder:validation:Enum:=HostGroup;ComputeCluster + // +required + // +unionDiscriminator + Type VSphereFailureDomainZoneType `json:"type"` + + // hostGroup holds the vmGroup and the hostGroup names in vCenter + // corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +unionMember + // +optional + HostGroup *VSphereFailureDomainHostGroup `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the +// VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster. +// +union +type VSphereFailureDomainRegionAffinity struct { + // type determines the vSphere object type for a region within this failure domain. + // Available types are Datacenter and ComputeCluster. + // When set to Datacenter, this means the vCenter Datacenter defined is the region. + // When set to ComputeCluster, this means the vCenter cluster defined is the region. + // +kubebuilder:validation:Enum:=ComputeCluster;Datacenter + // +required + // +unionDiscriminator + Type VSphereFailureDomainRegionType `json:"type"` +} + +// VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter +// corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also +// contains the vmHostRule which is an affinity vm-host rule in vCenter. +type VSphereFailureDomainHostGroup struct { + // vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + // vmGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMGroup string `json:"vmGroup"` + + // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + // hostGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + HostGroup string `json:"hostGroup"` + + // vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + // vmHostRule is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMHostRule string `json:"vmHostRule"` +} + // VSpherePlatformVCenterSpec stores the vCenter connection fields. // This is used by the vSphere CCM. type VSpherePlatformVCenterSpec struct { // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=255 // --- // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname @@ -1303,7 +1492,7 @@ type VSpherePlatformVCenterSpec struct { // be used by the Cloud Controller Manager. // Each datacenter listed here should be used within // a topology. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +listType=set Datacenters []string `json:"datacenters"` @@ -1504,45 +1693,67 @@ type IBMCloudServiceEndpoint struct { // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` // - // +kubebuilder:validation:Required + // +required Name IBMCloudServiceName `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated // endpoint for a client. - // This must be provided and cannot be empty. + // This must be provided and cannot be empty. The path must follow the pattern + // /v[0,9]+ or /api/v[0,9]+ // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MaxLength=300 // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + // +openshift:validation:FeatureGateAwareXValidation:featureGate=DyanmicServiceEndpointIBMCloud,rule="url(self).getScheme() == \"https\"",message="url must use https scheme" + // +openshift:validation:FeatureGateAwareXValidation:featureGate=DyanmicServiceEndpointIBMCloud,rule=`matches((url(self).getEscapedPath()), '^/(api/)?v[0-9]+/{0,1}$')`,message="url path must match /v[0,9]+ or /api/v[0,9]+" URL string `json:"url"` } // IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. // This only includes fields that can be modified in the cluster. -type IBMCloudPlatformSpec struct{} +type IBMCloudPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM service. These endpoints are used by components + // within the cluster when trying to reach the IBM Cloud Services that have been + // overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus + // are updated to reflect the same custom endpoints. + // A maximum of 13 service endpoints overrides are supported. + // +kubebuilder:validation:MaxItems=13 + // +listType=map + // +listMapKey=name + // +optional + // +openshift:enable:FeatureGate=DyanmicServiceEndpointIBMCloud + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` +} // IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. type IBMCloudPlatformStatus struct { - // Location is where the cluster has been deployed + // location is where the cluster has been deployed Location string `json:"location,omitempty"` - // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + // resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. ResourceGroupName string `json:"resourceGroupName,omitempty"` - // ProviderType indicates the type of cluster that was created + // providerType indicates the type of cluster that was created ProviderType IBMCloudProviderType `json:"providerType,omitempty"` - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing // the DNS zone for the cluster's base domain CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of an IBM Cloud service. These endpoints are consumed by - // components within the cluster to reach the respective IBM Cloud Services. + // service endpoints of an IBM service. These endpoints are used by components + // within the cluster when trying to reach the IBM Cloud Services that have been + // overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus + // are updated to reflect the same custom endpoints. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=DyanmicServiceEndpointIBMCloud,maxItems=13 // +listType=map // +listMapKey=name // +optional @@ -1592,15 +1803,15 @@ type PowerVSServiceEndpoint struct { // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + // +required + // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC Name string `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated // endpoint for a client. // This must be provided and cannot be empty. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=uri // +kubebuilder:validation:Pattern=`^https://` @@ -1647,11 +1858,11 @@ type PowerVSPlatformStatus struct { // +optional ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing // the DNS zone for the cluster's base domain CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` } @@ -1663,7 +1874,6 @@ type AlibabaCloudPlatformSpec struct{} // AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. type AlibabaCloudPlatformStatus struct { // region specifies the region for Alibaba Cloud resources created for the cluster. - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` // +required Region string `json:"region"` @@ -1682,13 +1892,11 @@ type AlibabaCloudPlatformStatus struct { // AlibabaCloudResourceTag is the set of tags to add to apply to resources. type AlibabaCloudResourceTag struct { // key is the key of the tag. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required Key string `json:"key"` // value is the value of the tag. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required @@ -1723,7 +1931,7 @@ type NutanixPlatformSpec struct { // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the // proxy spec.noProxy list. - // +kubebuilder:validation:Required + // +required PrismCentral NutanixPrismEndpoint `json:"prismCentral"` // prismElements holds one or more endpoint address and port data to access the Nutanix @@ -1731,7 +1939,7 @@ type NutanixPlatformSpec struct { // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) // spread over multiple Prism Elements (clusters) of the Prism Central. - // +kubebuilder:validation:Required + // +required // +listType=map // +listMapKey=name PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` @@ -1739,6 +1947,7 @@ type NutanixPlatformSpec struct { // failureDomains configures failure domains information for the Nutanix platform. // When set, the failure domains defined here may be used to spread Machines across // prism element clusters to improve fault tolerance of the cluster. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 // +listType=map // +listMapKey=name // +optional @@ -1752,7 +1961,7 @@ type NutanixFailureDomain struct { // It must consist of only lower case alphanumeric characters and hyphens (-). // It must start and end with an alphanumeric character. // This value is arbitrary and is used to identify the failure domain within the platform. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` @@ -1761,17 +1970,19 @@ type NutanixFailureDomain struct { // cluster is to identify the cluster (the Prism Element under management of the Prism Central), // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained // from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required Cluster NutanixResourceIdentifier `json:"cluster"` // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be // obtained from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=1 - // +listType=map - // +listMapKey=type + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 + // +openshift:validation:FeatureGateAwareXValidation:featureGate=NutanixMultiSubnets,rule="self.all(x, self.exists_one(y, x == y))",message="each subnet must be unique" + // +listType=atomic Subnets []NutanixResourceIdentifier `json:"subnets"` } @@ -1794,7 +2005,7 @@ const ( type NutanixResourceIdentifier struct { // type is the identifier type to use for this resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NutanixIdentifierType `json:"type"` // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. @@ -1809,12 +2020,12 @@ type NutanixResourceIdentifier struct { // NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) type NutanixPrismEndpoint struct { // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 Address string `json:"address"` // port is the port number to access the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 Port int32 `json:"port"` @@ -1824,7 +2035,7 @@ type NutanixPrismEndpoint struct { type NutanixPrismElementEndpoint struct { // name is the name of the Prism Element (cluster). This value will correspond with // the cluster field configured on other resources (eg Machines, PVCs, etc). - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 Name string `json:"name"` @@ -1832,7 +2043,7 @@ type NutanixPrismElementEndpoint struct { // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the // proxy spec.noProxy list. - // +kubebuilder:validation:Required + // +required Endpoint NutanixPrismEndpoint `json:"endpoint"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 302913a16..9492e08a7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -27,7 +27,6 @@ type Ingress struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec IngressSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -150,7 +149,7 @@ type AWSIngressSpec struct { // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb // +unionDiscriminator // +kubebuilder:validation:Enum:=NLB;Classic - // +kubebuilder:validation:Required + // +required Type AWSLBType `json:"type,omitempty"` } @@ -223,7 +222,6 @@ type ComponentRouteSpec struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` @@ -233,12 +231,10 @@ type ComponentRouteSpec struct { // entry in the list of status.componentRoutes if the route is to be customized. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required Name string `json:"name"` // hostname is the hostname that should be used by the route. - // +kubebuilder:validation:Required // +required Hostname Hostname `json:"hostname"` @@ -260,7 +256,6 @@ type ComponentRouteStatus struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` @@ -271,12 +266,10 @@ type ComponentRouteStatus struct { // entry in the list of spec.componentRoutes if the route is to be customized. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required Name string `json:"name"` // defaultHostname is the hostname of this route prior to customization. - // +kubebuilder:validation:Required // +required DefaultHostname Hostname `json:"defaultHostname"` @@ -310,7 +303,6 @@ type ComponentRouteStatus struct { // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required // +required RelatedObjects []ObjectReference `json:"relatedObjects"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 1eeae69da..41dc2eb97 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -30,7 +30,6 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. - // +kubebuilder:validation:Required // +required Spec NetworkSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -55,7 +54,7 @@ type NetworkSpec struct { // +listType=atomic ServiceNetwork []string `json:"serviceNetwork"` - // NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). + // networkType is the plugin that is to be deployed (e.g. OVNKubernetes). // This should match a value that the cluster-network-operator understands, // or else no networking will be installed. // Currently supported values are: @@ -101,24 +100,22 @@ type NetworkStatus struct { // +listType=atomic ServiceNetwork []string `json:"serviceNetwork,omitempty"` - // NetworkType is the plugin that is deployed (e.g. OVNKubernetes). + // networkType is the plugin that is deployed (e.g. OVNKubernetes). NetworkType string `json:"networkType,omitempty"` - // ClusterNetworkMTU is the MTU for inter-pod networking. + // clusterNetworkMTU is the MTU for inter-pod networking. ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` - // Migration contains the cluster network migration configuration. + // migration contains the cluster network migration configuration. Migration *NetworkMigration `json:"migration,omitempty"` // conditions represents the observations of a network.config current state. // Known .status.conditions.type are: "NetworkDiagnosticsAvailable" // +optional - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs @@ -185,35 +182,35 @@ type NetworkList struct { // NetworkMigration represents the network migration status. type NetworkMigration struct { - // NetworkType is the target plugin that is being deployed. + // networkType is the target plugin that is being deployed. // DEPRECATED: network type migration is no longer supported, // so this should always be unset. // +optional NetworkType string `json:"networkType,omitempty"` - // MTU is the MTU configuration that is being deployed. + // mtu is the MTU configuration that is being deployed. // +optional MTU *MTUMigration `json:"mtu,omitempty"` } // MTUMigration contains infomation about MTU migration. type MTUMigration struct { - // Network contains MTU migration configuration for the default network. + // network contains MTU migration configuration for the default network. // +optional Network *MTUMigrationValues `json:"network,omitempty"` - // Machine contains MTU migration configuration for the machine's uplink. + // machine contains MTU migration configuration for the machine's uplink. // +optional Machine *MTUMigrationValues `json:"machine,omitempty"` } // MTUMigrationValues contains the values for a MTU migration. type MTUMigrationValues struct { - // To is the MTU to migrate to. + // to is the MTU to migrate to. // +kubebuilder:validation:Minimum=0 To *uint32 `json:"to"` - // From is the MTU to migrate from. + // from is the MTU to migrate from. // +kubebuilder:validation:Minimum=0 // +optional From *uint32 `json:"from,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index a50328c91..3977f9f14 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -28,7 +28,6 @@ type Node struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec NodeSpec `json:"spec"` @@ -38,11 +37,11 @@ type Node struct { } type NodeSpec struct { - // CgroupMode determines the cgroups version on the node + // cgroupMode determines the cgroups version on the node // +optional CgroupMode CgroupMode `json:"cgroupMode,omitempty"` - // WorkerLatencyProfile determins the how fast the kubelet is updating + // workerLatencyProfile determins the how fast the kubelet is updating // the status and corresponding reaction of the cluster // +optional WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` @@ -69,12 +68,10 @@ type NodeSpec struct { type NodeStatus struct { // conditions contain the details and the current state of the nodes.config object - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:validation:Enum=v1;v2;"" diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index dce08a17f..20845e4db 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -27,7 +27,6 @@ type OAuth struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec OAuthSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index 78fd3f41a..3d219862b 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -24,7 +24,6 @@ type Project struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ProjectSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 2dfc66b1c..ed40176ce 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -25,8 +25,7 @@ type Proxy struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the proxy configuration - // +kubebuilder:validation:Required + // spec holds user-settable values for the proxy configuration // +required Spec ProxySpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 2749f4f70..c90d5633f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -25,7 +25,6 @@ type Scheduler struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec SchedulerSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -74,7 +73,7 @@ type SchedulerSpec struct { // would not be applied. // +optional DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` - // MastersSchedulable allows masters nodes to be schedulable. When this flag is + // mastersSchedulable allows masters nodes to be schedulable. When this flag is // turned on, all the master nodes in the cluster will be made schedulable, // so that workload pods can run on them. The default value for this field is false, // meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go index 4d642e060..00953957f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_testreporting.go +++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -15,7 +15,6 @@ type TestReporting struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec TestReportingSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -24,20 +23,20 @@ type TestReporting struct { } type TestReportingSpec struct { - // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + // testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` } type FeatureGateTests struct { - // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + // featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. FeatureGate string `json:"featureGate"` - // Tests contains an item for every TestName + // tests contains an item for every TestName Tests []TestDetails `json:"tests"` } type TestDetails struct { - // TestName is the name of the test as it appears in junit XMLs. + // testName is the name of the test as it appears in junit XMLs. // It does not include the suite name since the same test can be executed in many suites. TestName string `json:"testName"` } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 1b7fa44aa..40b0c857b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -2226,6 +2226,11 @@ func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { *out = new(CloudLoadBalancerConfig) (*in).DeepCopyInto(*out) } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]GCPServiceEndpoint, len(*in)) + copy(*out, *in) + } return } @@ -2271,6 +2276,22 @@ func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPServiceEndpoint) DeepCopyInto(out *GCPServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceEndpoint. +func (in *GCPServiceEndpoint) DeepCopy() *GCPServiceEndpoint { + if in == nil { + return nil + } + out := new(GCPServiceEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { *out = *in @@ -2450,6 +2471,11 @@ func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } return } @@ -4689,7 +4715,7 @@ func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { if in.IBMCloud != nil { in, out := &in.IBMCloud, &out.IBMCloud *out = new(IBMCloudPlatformSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.Kubevirt != nil { in, out := &in.Kubevirt, &out.Kubevirt @@ -5827,9 +5853,72 @@ func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainHostGroup) DeepCopyInto(out *VSphereFailureDomainHostGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainHostGroup. +func (in *VSphereFailureDomainHostGroup) DeepCopy() *VSphereFailureDomainHostGroup { + if in == nil { + return nil + } + out := new(VSphereFailureDomainHostGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainRegionAffinity) DeepCopyInto(out *VSphereFailureDomainRegionAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainRegionAffinity. +func (in *VSphereFailureDomainRegionAffinity) DeepCopy() *VSphereFailureDomainRegionAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainRegionAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainZoneAffinity) DeepCopyInto(out *VSphereFailureDomainZoneAffinity) { + *out = *in + if in.HostGroup != nil { + in, out := &in.HostGroup, &out.HostGroup + *out = new(VSphereFailureDomainHostGroup) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainZoneAffinity. +func (in *VSphereFailureDomainZoneAffinity) DeepCopy() *VSphereFailureDomainZoneAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainZoneAffinity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { *out = *in + if in.RegionAffinity != nil { + in, out := &in.RegionAffinity, &out.RegionAffinity + *out = new(VSphereFailureDomainRegionAffinity) + **out = **in + } + if in.ZoneAffinity != nil { + in, out := &in.ZoneAffinity, &out.ZoneAffinity + *out = new(VSphereFailureDomainZoneAffinity) + (*in).DeepCopyInto(*out) + } in.Topology.DeepCopyInto(&out.Topology) return } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index abfea5eaf..f8182fffe 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -115,6 +115,7 @@ clusterversions.config.openshift.io: Capability: "" Category: "" FeatureGates: + - ImageStreamImportMode - SignatureStores FilenameOperatorName: cluster-version-operator FilenameOperatorOrdering: "01" @@ -312,9 +313,15 @@ infrastructures.config.openshift.io: FeatureGates: - AWSClusterHostedDNS - BareMetalLoadBalancer + - DualReplica + - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNS + - GCPCustomAPIEndpoints - GCPLabelsTags + - HighlyAvailableArbiter + - NutanixMultiSubnets - VSphereControlPlaneMachineSet + - VSphereHostVMGroupZonal - VSphereMultiNetworks - VSphereMultiVCenters FilenameOperatorName: config-operator diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 145a7e4c0..819b713ad 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -22,8 +22,8 @@ func (AdmissionConfig) SwaggerDoc() map[string]string { var map_AdmissionPluginConfig = map[string]string{ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", - "location": "Location is the path to a configuration file that contains the plugin's configuration", - "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", } func (AdmissionPluginConfig) SwaggerDoc() map[string]string { @@ -37,8 +37,8 @@ var map_AuditConfig = map[string]string{ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", "maximumRetainedFiles": "Maximum number of old log files to retain.", "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", - "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", - "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "logFormat": "Format of saved audits (legacy or json).", "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", "webHookMode": "Strategy for sending audit events (block or batch).", @@ -50,8 +50,8 @@ func (AuditConfig) SwaggerDoc() map[string]string { var map_CertInfo = map[string]string{ "": "CertInfo relates a certificate with a private key", - "certFile": "CertFile is a file containing a PEM-encoded certificate", - "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", } func (CertInfo) SwaggerDoc() map[string]string { @@ -71,7 +71,7 @@ func (ClientConnectionOverrides) SwaggerDoc() map[string]string { var map_ConfigMapFileReference = map[string]string{ "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", - "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", + "key": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", } func (ConfigMapFileReference) SwaggerDoc() map[string]string { @@ -107,8 +107,8 @@ func (DelegatedAuthorization) SwaggerDoc() map[string]string { var map_EtcdConnectionInfo = map[string]string{ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", - "urls": "URLs are the URLs for etcd", - "ca": "CA is a file containing trusted roots for the etcd server certificates", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", } func (EtcdConnectionInfo) SwaggerDoc() map[string]string { @@ -116,7 +116,7 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string { } var map_EtcdStorageConfig = map[string]string{ - "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", + "storagePrefix": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", } func (EtcdStorageConfig) SwaggerDoc() map[string]string { @@ -138,7 +138,7 @@ func (GenericAPIServerConfig) SwaggerDoc() map[string]string { var map_GenericControllerConfig = map[string]string{ "": "GenericControllerConfig provides information to configure a controller", - "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", "authentication": "authentication allows configuration of authentication for the endpoints", "authorization": "authorization allows configuration of authentication for the endpoints", @@ -150,8 +150,8 @@ func (GenericControllerConfig) SwaggerDoc() map[string]string { var map_HTTPServingInfo = map[string]string{ "": "HTTPServingInfo holds configuration for serving HTTP", - "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", - "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } func (HTTPServingInfo) SwaggerDoc() map[string]string { @@ -193,7 +193,7 @@ func (MaxAgePolicy) SwaggerDoc() map[string]string { var map_NamedCertificate = map[string]string{ "": "NamedCertificate specifies a certificate/key, and the names it should be served for", - "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", } func (NamedCertificate) SwaggerDoc() map[string]string { @@ -202,8 +202,8 @@ func (NamedCertificate) SwaggerDoc() map[string]string { var map_RemoteConnectionInfo = map[string]string{ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "URL is the remote URL to connect to", - "ca": "CA is the CA for verifying TLS connections", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", } func (RemoteConnectionInfo) SwaggerDoc() map[string]string { @@ -233,12 +233,12 @@ func (SecretNameReference) SwaggerDoc() map[string]string { var map_ServingInfo = map[string]string{ "": "ServingInfo holds information about serving web pages", - "bindAddress": "BindAddress is the ip:port to serve on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", - "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", - "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", - "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", } func (ServingInfo) SwaggerDoc() map[string]string { @@ -255,10 +255,10 @@ func (StringSource) SwaggerDoc() map[string]string { var map_StringSourceSpec = map[string]string{ "": "StringSourceSpec specifies a string value, or external location", - "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", - "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", - "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", - "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", } func (StringSourceSpec) SwaggerDoc() map[string]string { @@ -369,7 +369,7 @@ var map_AuthenticationSpec = map[string]string{ "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", - "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", + "oidcProviders": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", } func (AuthenticationSpec) SwaggerDoc() map[string]string { @@ -378,7 +378,7 @@ func (AuthenticationSpec) SwaggerDoc() map[string]string { var map_AuthenticationStatus = map[string]string{ "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", - "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", + "oidcClients": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", } func (AuthenticationStatus) SwaggerDoc() map[string]string { @@ -395,11 +395,11 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { } var map_OIDCClientConfig = map[string]string{ - "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration", - "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", - "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", - "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.", + "componentName": "componentName is the name of the component that is supposed to consume this client configuration", + "componentNamespace": "componentNamespace is the namespace of the component that is supposed to consume this client configuration", + "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", + "clientSecret": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", + "extraScopes": "extraScopes is an optional set of scopes to request tokens with.", } func (OIDCClientConfig) SwaggerDoc() map[string]string { @@ -409,7 +409,7 @@ func (OIDCClientConfig) SwaggerDoc() map[string]string { var map_OIDCClientReference = map[string]string{ "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", + "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", } func (OIDCClientReference) SwaggerDoc() map[string]string { @@ -417,11 +417,11 @@ func (OIDCClientReference) SwaggerDoc() map[string]string { } var map_OIDCClientStatus = map[string]string{ - "componentName": "ComponentName is the name of the component that will consume a client configuration.", - "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.", - "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.", - "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", - "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", + "componentName": "componentName is the name of the component that will consume a client configuration.", + "componentNamespace": "componentNamespace is the namespace of the component that will consume a client configuration.", + "currentOIDCClients": "currentOIDCClients is a list of clients that the component is currently using.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", } func (OIDCClientStatus) SwaggerDoc() map[string]string { @@ -429,11 +429,11 @@ func (OIDCClientStatus) SwaggerDoc() map[string]string { } var map_OIDCProvider = map[string]string{ - "name": "Name of the OIDC provider", - "issuer": "Issuer describes atributes of the OIDC token issuer", - "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer", - "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", - "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", + "name": "name of the OIDC provider", + "issuer": "issuer describes atributes of the OIDC token issuer", + "oidcClients": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer", + "claimMappings": "claimMappings describes rules on how to transform information from an ID token into a cluster identity", + "claimValidationRules": "claimValidationRules are rules that are applied to validate token claims to authenticate users.", } func (OIDCProvider) SwaggerDoc() map[string]string { @@ -441,7 +441,7 @@ func (OIDCProvider) SwaggerDoc() map[string]string { } var map_PrefixedClaimMapping = map[string]string{ - "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "prefix": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -449,7 +449,7 @@ func (PrefixedClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMapping = map[string]string{ - "claim": "Claim is a JWT token claim to be used in the mapping", + "claim": "claim is a JWT token claim to be used in the mapping", } func (TokenClaimMapping) SwaggerDoc() map[string]string { @@ -457,8 +457,8 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMappings = map[string]string{ - "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", - "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", + "username": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", + "groups": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { @@ -466,8 +466,8 @@ func (TokenClaimMappings) SwaggerDoc() map[string]string { } var map_TokenClaimValidationRule = map[string]string{ - "type": "Type sets the type of the validation rule", - "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value", + "type": "type sets the type of the validation rule", + "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value", } func (TokenClaimValidationRule) SwaggerDoc() map[string]string { @@ -476,7 +476,7 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { var map_TokenIssuer = map[string]string{ "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", + "audiences": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", } @@ -485,8 +485,8 @@ func (TokenIssuer) SwaggerDoc() map[string]string { } var map_TokenRequiredClaim = map[string]string{ - "claim": "Claim is a name of a required claim. Only claims with string values are supported.", - "requiredValue": "RequiredValue is the required value for the claim.", + "claim": "claim is a name of a required claim. Only claims with string values are supported.", + "requiredValue": "requiredValue is the required value for the claim.", } func (TokenRequiredClaim) SwaggerDoc() map[string]string { @@ -494,7 +494,7 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { } var map_UsernameClaimMapping = map[string]string{ - "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefixPolicy": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", } func (UsernameClaimMapping) SwaggerDoc() map[string]string { @@ -513,7 +513,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { var map_Build = map[string]string{ "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the build controller configuration", + "spec": "spec holds user-settable values for the build controller configuration", } func (Build) SwaggerDoc() map[string]string { @@ -521,11 +521,11 @@ func (Build) SwaggerDoc() map[string]string { } var map_BuildDefaults = map[string]string{ - "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", - "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", - "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", - "resources": "Resources defines resource requirements to execute the build.", + "defaultProxy": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "resources defines resource requirements to execute the build.", } func (BuildDefaults) SwaggerDoc() map[string]string { @@ -542,10 +542,10 @@ func (BuildList) SwaggerDoc() map[string]string { } var map_BuildOverrides = map[string]string{ - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", - "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", - "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", - "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", } func (BuildOverrides) SwaggerDoc() map[string]string { @@ -553,9 +553,9 @@ func (BuildOverrides) SwaggerDoc() map[string]string { } var map_BuildSpec = map[string]string{ - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", - "buildDefaults": "BuildDefaults controls the default information for Builds", - "buildOverrides": "BuildOverrides controls override settings for builds", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "buildDefaults controls the default information for Builds", + "buildOverrides": "buildOverrides controls override settings for builds", } func (BuildSpec) SwaggerDoc() map[string]string { @@ -563,8 +563,8 @@ func (BuildSpec) SwaggerDoc() map[string]string { } var map_ImageLabel = map[string]string{ - "name": "Name defines the name of the label. It must have non-zero length.", - "value": "Value defines the literal value of the label.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", } func (ImageLabel) SwaggerDoc() map[string]string { @@ -648,7 +648,7 @@ func (OperandVersion) SwaggerDoc() map[string]string { var map_ClusterCondition = map[string]string{ "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", - "promql": "promQL represents a cluster condition based on PromQL.", + "promql": "promql represents a cluster condition based on PromQL.", } func (ClusterCondition) SwaggerDoc() map[string]string { @@ -698,7 +698,7 @@ func (ClusterVersionList) SwaggerDoc() map[string]string { var map_ClusterVersionSpec = map[string]string{ "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", - "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", @@ -764,7 +764,7 @@ func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { var map_PromQLClusterCondition = map[string]string{ "": "PromQLClusterCondition represents a cluster condition based on PromQL.", - "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", + "promql": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", } func (PromQLClusterCondition) SwaggerDoc() map[string]string { @@ -772,11 +772,12 @@ func (PromQLClusterCondition) SwaggerDoc() map[string]string { } var map_Release = map[string]string{ - "": "Release represents an OpenShift release image and associated metadata.", - "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", - "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", - "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", - "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", + "": "Release represents an OpenShift release image and associated metadata.", + "architecture": "architecture is an optional field that indicates the value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. Valid values are 'Multi' and empty.", + "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", } func (Release) SwaggerDoc() map[string]string { @@ -796,8 +797,8 @@ func (SignatureStore) SwaggerDoc() map[string]string { var map_Update = map[string]string{ "": "Update represents an administrator update request.", "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", - "version": "version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified.", - "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified.", + "version": "version is a semantic version identifying the update version. version is required if architecture is specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", + "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, architecture cannot be specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", } @@ -1186,7 +1187,7 @@ func (AWSPlatformSpec) SwaggerDoc() map[string]string { var map_AWSPlatformStatus = map[string]string{ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", "region": "region holds the default AWS region for new AWS resources created by the cluster.", - "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } @@ -1197,8 +1198,8 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string { var map_AWSResourceTag = map[string]string{ "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", - "key": "key is the key of the tag", - "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", + "key": "key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. Key should consist of between 1 and 128 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.", + "value": "value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. Value should consist of between 1 and 256 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", } func (AWSResourceTag) SwaggerDoc() map[string]string { @@ -1360,7 +1361,7 @@ func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { var map_ExternalPlatformSpec = map[string]string{ "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", - "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", + "platformName": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", } func (ExternalPlatformSpec) SwaggerDoc() map[string]string { @@ -1391,6 +1392,7 @@ var map_GCPPlatformStatus = map[string]string{ "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "serviceEndpoints": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 9.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { @@ -1418,8 +1420,19 @@ func (GCPResourceTag) SwaggerDoc() map[string]string { return map_GCPResourceTag } +var map_GCPServiceEndpoint = map[string]string{ + "": "GCPServiceEndpoint store the configuration of a custom url to override existing defaults of GCP Services.", + "name": "name is the name of the GCP service whose endpoint is being overridden. This must be provided and cannot be empty.\n\nAllowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, Storage, and TagManager.\n\nAs an example, when setting the name to Compute all requests made by the caller to the GCP Compute Service will be directed to the endpoint specified in the url field.", + "url": "url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified in the name field. url is required, must use the scheme https, must not be more than 253 characters in length, and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL)\n\nAn example of a valid endpoint that overrides the Compute Service: \"https://compute-myendpoint1.p.googleapis.com\"", +} + +func (GCPServiceEndpoint) SwaggerDoc() map[string]string { + return map_GCPServiceEndpoint +} + var map_IBMCloudPlatformSpec = map[string]string{ - "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", } func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { @@ -1428,12 +1441,12 @@ func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { var map_IBMCloudPlatformStatus = map[string]string{ "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", - "location": "Location is where the cluster has been deployed", - "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", - "providerType": "ProviderType indicates the type of cluster that was created", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.", + "location": "location is where the cluster has been deployed", + "resourceGroupName": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "providerType indicates the type of cluster that was created", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", } func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { @@ -1443,7 +1456,7 @@ func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { var map_IBMCloudServiceEndpoint = map[string]string{ "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", - "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. The path must follow the pattern /v[0,9]+ or /api/v[0,9]+", } func (IBMCloudServiceEndpoint) SwaggerDoc() map[string]string { @@ -1519,7 +1532,7 @@ var map_NutanixFailureDomain = map[string]string{ "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.", "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.", "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", - "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", } func (NutanixFailureDomain) SwaggerDoc() map[string]string { @@ -1660,19 +1673,19 @@ func (OvirtPlatformStatus) SwaggerDoc() map[string]string { var map_PlatformSpec = map[string]string{ "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.", } @@ -1683,20 +1696,20 @@ func (PlatformSpec) SwaggerDoc() map[string]string { var map_PlatformStatus = map[string]string{ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", - "external": "External contains settings specific to the generic External infrastructure provider.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "external contains settings specific to the generic External infrastructure provider.", } func (PlatformStatus) SwaggerDoc() map[string]string { @@ -1718,8 +1731,8 @@ var map_PowerVSPlatformStatus = map[string]string{ "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", } func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { @@ -1736,13 +1749,45 @@ func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { return map_PowerVSServiceEndpoint } +var map_VSphereFailureDomainHostGroup = map[string]string{ + "": "VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "vmGroup": "vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. vmGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "hostGroup": "hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. hostGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "vmHostRule": "vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. vmHostRule is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", +} + +func (VSphereFailureDomainHostGroup) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainHostGroup +} + +var map_VSphereFailureDomainRegionAffinity = map[string]string{ + "": "VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.", + "type": "type determines the vSphere object type for a region within this failure domain. Available types are Datacenter and ComputeCluster. When set to Datacenter, this means the vCenter Datacenter defined is the region. When set to ComputeCluster, this means the vCenter cluster defined is the region.", +} + +func (VSphereFailureDomainRegionAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainRegionAffinity +} + +var map_VSphereFailureDomainZoneAffinity = map[string]string{ + "": "VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. This configuration within vCenter creates the required association between a failure domain, virtual machines and ESXi hosts to create a vm-host based zone.", + "type": "type determines the vSphere object type for a zone within this failure domain. Available types are ComputeCluster and HostGroup. When set to ComputeCluster, this means the vCenter cluster defined is the zone. When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and this means the zone is defined by the grouping of those fields.", + "hostGroup": "hostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", +} + +func (VSphereFailureDomainZoneAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainZoneAffinity +} + var map_VSpherePlatformFailureDomainSpec = map[string]string{ - "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", - "name": "name defines the arbitrary but unique name of a failure domain.", - "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", - "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", - "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", - "topology": "Topology describes a given failure domain using vSphere constructs", + "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", + "name": "name defines the arbitrary but unique name of a failure domain.", + "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", + "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", + "regionAffinity": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.", + "zoneAffinity": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", + "topology": "topology describes a given failure domain using vSphere constructs", } func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { @@ -1961,8 +2006,8 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string { var map_MTUMigration = map[string]string{ "": "MTUMigration contains infomation about MTU migration.", - "network": "Network contains MTU migration configuration for the default network.", - "machine": "Machine contains MTU migration configuration for the machine's uplink.", + "network": "network contains MTU migration configuration for the default network.", + "machine": "machine contains MTU migration configuration for the machine's uplink.", } func (MTUMigration) SwaggerDoc() map[string]string { @@ -1971,8 +2016,8 @@ func (MTUMigration) SwaggerDoc() map[string]string { var map_MTUMigrationValues = map[string]string{ "": "MTUMigrationValues contains the values for a MTU migration.", - "to": "To is the MTU to migrate to.", - "from": "From is the MTU to migrate from.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", } func (MTUMigrationValues) SwaggerDoc() map[string]string { @@ -2031,8 +2076,8 @@ func (NetworkList) SwaggerDoc() map[string]string { var map_NetworkMigration = map[string]string{ "": "NetworkMigration represents the network migration status.", - "networkType": "NetworkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", - "mtu": "MTU is the MTU configuration that is being deployed.", + "networkType": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "mtu": "mtu is the MTU configuration that is being deployed.", } func (NetworkMigration) SwaggerDoc() map[string]string { @@ -2043,7 +2088,7 @@ var map_NetworkSpec = map[string]string{ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", - "networkType": "NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", + "networkType": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", @@ -2057,9 +2102,9 @@ var map_NetworkStatus = map[string]string{ "": "NetworkStatus is the current network configuration.", "clusterNetwork": "IP address pool to use for pod IPs.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", - "networkType": "NetworkType is the plugin that is deployed (e.g. OVNKubernetes).", - "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", - "migration": "Migration contains the cluster network migration configuration.", + "networkType": "networkType is the plugin that is deployed (e.g. OVNKubernetes).", + "clusterNetworkMTU": "clusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "migration contains the cluster network migration configuration.", "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"", } @@ -2088,8 +2133,8 @@ func (NodeList) SwaggerDoc() map[string]string { } var map_NodeSpec = map[string]string{ - "cgroupMode": "CgroupMode determines the cgroups version on the node", - "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "cgroupMode": "cgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", } @@ -2437,7 +2482,7 @@ func (TemplateReference) SwaggerDoc() map[string]string { var map_Proxy = map[string]string{ "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the proxy configuration", + "spec": "spec holds user-settable values for the proxy configuration", "status": "status holds observed values from the cluster. They may not be overridden.", } @@ -2512,7 +2557,7 @@ var map_SchedulerSpec = map[string]string{ "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.", "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", - "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", + "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", } func (SchedulerSpec) SwaggerDoc() map[string]string { @@ -2520,8 +2565,8 @@ func (SchedulerSpec) SwaggerDoc() map[string]string { } var map_FeatureGateTests = map[string]string{ - "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", - "tests": "Tests contains an item for every TestName", + "featureGate": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "tests contains an item for every TestName", } func (FeatureGateTests) SwaggerDoc() map[string]string { @@ -2529,7 +2574,7 @@ func (FeatureGateTests) SwaggerDoc() map[string]string { } var map_TestDetails = map[string]string{ - "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", + "testName": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", } func (TestDetails) SwaggerDoc() map[string]string { @@ -2547,7 +2592,7 @@ func (TestReporting) SwaggerDoc() map[string]string { } var map_TestReportingSpec = map[string]string{ - "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", + "testsForFeatureGates": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", } func (TestReportingSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go index 36432ceb8..4b30ea380 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go @@ -30,6 +30,8 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, + &ClusterMonitoring{}, + &ClusterMonitoringList{}, &InsightsDataGather{}, &InsightsDataGatherList{}, &Backup{}, diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index 65eb5c1f7..e52a2e5c5 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -24,18 +24,16 @@ type Backup struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec BackupSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. - // +kubebuilder:validation:Optional // +optional Status BackupStatus `json:"status"` } type BackupSpec struct { // etcd specifies the configuration for periodic backups of the etcd cluster - // +kubebuilder:validation:Required + // +required EtcdBackupSpec EtcdBackupSpec `json:"etcd"` } @@ -45,12 +43,11 @@ type BackupStatus struct { // EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator type EtcdBackupSpec struct { - // Schedule defines the recurring backup schedule in Cron format + // schedule defines the recurring backup schedule in Cron format // every 2 hours: 0 */2 * * * // every day at 3am: 0 3 * * * // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. // The current default is "no backups", but will change in the future. - // +kubebuilder:validation:Optional // +optional // +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$` Schedule string `json:"schedule"` @@ -73,7 +70,6 @@ type EtcdBackupSpec struct { // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. // If not specified, this will default to the time zone of the kube-controller-manager process. // See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // +kubebuilder:validation:Optional // +optional // +kubebuilder:validation:Pattern:=`^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$` TimeZone string `json:"timeZone"` @@ -84,17 +80,15 @@ type EtcdBackupSpec struct { // [A-Za-z_]+(/[A-Za-z_]+){1,2} - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by one or two occurrences of a forward slash followed by one or more alphabetical characters or underscores. This allows for matching timezone identifiers with 2 or 3 parts, e.g America/Argentina/Buenos_Aires // (/GMT[+-]\d{1,2})? - Makes the GMT offset suffix optional. It matches "/GMT" followed by either a plus ("+") or minus ("-") sign and one or two digits (the GMT offset) - // RetentionPolicy defines the retention policy for retaining and deleting existing backups. - // +kubebuilder:validation:Optional + // retentionPolicy defines the retention policy for retaining and deleting existing backups. // +optional RetentionPolicy RetentionPolicy `json:"retentionPolicy"` - // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the // etcd backup files would be saved // The PVC itself must always be created in the "openshift-etcd" namespace // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. // In the future this would be backups saved across the control-plane master nodes. - // +kubebuilder:validation:Optional // +optional PVCName string `json:"pvcName"` } @@ -115,45 +109,40 @@ const ( // This struct is a discriminated union that allows users to select the type of retention policy from the supported types. // +union type RetentionPolicy struct { - // RetentionType sets the type of retention policy. + // retentionType sets the type of retention policy. // Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. // The current default is RetentionNumber with 15 backups kept. // +unionDiscriminator // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize" RetentionType RetentionType `json:"retentionType"` - // RetentionNumber configures the retention policy based on the number of backups - // +kubebuilder:validation:Optional + // retentionNumber configures the retention policy based on the number of backups // +optional RetentionNumber *RetentionNumberConfig `json:"retentionNumber,omitempty"` - // RetentionSize configures the retention policy based on the size of backups - // +kubebuilder:validation:Optional + // retentionSize configures the retention policy based on the size of backups // +optional RetentionSize *RetentionSizeConfig `json:"retentionSize,omitempty"` } // RetentionNumberConfig specifies the configuration of the retention policy on the number of backups type RetentionNumberConfig struct { - // MaxNumberOfBackups defines the maximum number of backups to retain. + // maxNumberOfBackups defines the maximum number of backups to retain. // If the existing number of backups saved is equal to MaxNumberOfBackups then // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Required // +required MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"` } // RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups type RetentionSizeConfig struct { - // MaxSizeOfBackupsGb defines the total size in GB of backups to retain. + // maxSizeOfBackupsGb defines the total size in GB of backups to retain. // If the current total size backups exceeds MaxSizeOfBackupsGb then // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Required // +required MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go index 14650fd48..107b9e29a 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -24,7 +24,7 @@ type ClusterImagePolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec contains the configuration for the cluster image policy. - // +kubebuilder:validation:Required + // +required Spec ClusterImagePolicySpec `json:"spec"` // status contains the observed state of the resource. // +optional @@ -44,13 +44,13 @@ type ClusterImagePolicySpec struct { // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxItems=256 // +listType=set Scopes []ImageScope `json:"scopes"` // policy contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. - // +kubebuilder:validation:Required + // +required Policy Policy `json:"policy"` } @@ -59,6 +59,7 @@ type ClusterImagePolicyStatus struct { // conditions provide details on the status of this API Resource. // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go new file mode 100644 index 000000000..c276971b5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -0,0 +1,103 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1929 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clustermonitoring,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations="description=Cluster Monitoring Operators configuration API" +// +openshift:enable:FeatureGate=ClusterMonitoringConfig +// ClusterMonitoring is the Schema for the Cluster Monitoring Operators API +type ClusterMonitoring struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user configuration for the Cluster Monitoring Operator + // +required + Spec ClusterMonitoringSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ClusterMonitoringStatus `json:"status,omitempty"` +} + +// MonitoringOperatorStatus defines the observed state of MonitoringOperator +type ClusterMonitoringStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type ClusterMonitoringList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of ClusterMonitoring + // +optional + Items []ClusterMonitoring `json:"items"` +} + +// ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator +// +required +type ClusterMonitoringSpec struct { + // userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. + // +required + UserDefined UserDefinedMonitoring `json:"userDefined"` +} + +// UserDefinedMonitoring config for user-defined projects. +// +required +type UserDefinedMonitoring struct { + // mode defines the different configurations of UserDefinedMonitoring + // Valid values are Disabled and NamespaceIsolated + // Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + // NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + // +kubebuilder:validation:Enum:="Disabled";"NamespaceIsolated" + // +required + Mode UserDefinedMode `json:"mode"` +} + +// UserDefinedMode specifies mode for UserDefine Monitoring +// +enum +type UserDefinedMode string + +const ( + // UserDefinedDisabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + UserDefinedDisabled UserDefinedMode = "Disabled" + // UserDefinedNamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + UserDefinedNamespaceIsolated UserDefinedMode = "NamespaceIsolated" +) diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index a177ddb0d..24ff257c9 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -23,7 +23,7 @@ type ImagePolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required + // +required Spec ImagePolicySpec `json:"spec"` // status contains the observed state of the resource. // +optional @@ -43,13 +43,13 @@ type ImagePolicySpec struct { // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxItems=256 // +listType=set Scopes []ImageScope `json:"scopes"` // policy contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. - // +kubebuilder:validation:Required + // +required Policy Policy `json:"policy"` } @@ -62,7 +62,7 @@ type ImageScope string // Policy defines the verification policy for the items in the scopes list. type Policy struct { // rootOfTrust specifies the root of trust for the policy. - // +kubebuilder:validation:Required + // +required RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` // signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". // +optional @@ -73,12 +73,14 @@ type Policy struct { // +union // +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SigstoreImageVerificationPKI,rule="has(self.policyType) && self.policyType == 'PKI' ? has(self.pki) : !has(self.pki)",message="pki is required when policyType is PKI, and forbidden otherwise" type PolicyRootOfTrust struct { // policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. // "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. // "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. + // "PKI" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required PolicyType PolicyType `json:"policyType"` // publicKey defines the root of trust based on a sigstore public key. // +optional @@ -88,21 +90,27 @@ type PolicyRootOfTrust struct { // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor // +optional FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` + // pki defines the root of trust based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. + // +optional + // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI + PKI *PKI `json:"pki,omitempty"` } -// +kubebuilder:validation:Enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate=SigstoreImageVerificationPKI,enum=PublicKey;FulcioCAWithRekor;PKI type PolicyType string const ( PublicKeyRootOfTrust PolicyType = "PublicKey" FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" + PKIRootOfTrust PolicyType = "PKI" ) // PublicKey defines the root of trust based on a sigstore public key. type PublicKey struct { // keyData contains inline base64-encoded data for the PEM format public key. // KeyData must be at most 8192 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=8192 KeyData []byte `json:"keyData"` // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. @@ -116,16 +124,16 @@ type PublicKey struct { type FulcioCAWithRekor struct { // fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. // fulcioCAData must be at most 8192 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=8192 FulcioCAData []byte `json:"fulcioCAData"` // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. // rekorKeyData must be at most 8192 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=8192 RekorKeyData []byte `json:"rekorKeyData"` // fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration. - // +kubebuilder:validation:Required + // +required FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"` } @@ -133,16 +141,58 @@ type FulcioCAWithRekor struct { type PolicyFulcioSubject struct { // oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. // Example: "https://expected.OIDC.issuer/" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" OIDCIssuer string `json:"oidcIssuer"` // signedEmail holds the email address the the Fulcio certificate is issued for. // Example: "expected-signing-user@example.com" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" SignedEmail string `json:"signedEmail"` } +// PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type PKI struct { + // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caRootsData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caRootsData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caRootsData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + CertificateAuthorityRootsData []byte `json:"caRootsData"` + // caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. + // caIntermediatesData requires caRootsData to be set. + // +optional + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caIntermediatesData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caIntermediatesData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caIntermediatesData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + // +kubebuilder:validation:MaxLength=8192 + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued. + // +required + PKICertificateSubject PKICertificateSubject `json:"pkiCertificateSubject"` +} + +// PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued. +// +kubebuilder:validation:XValidation:rule="has(self.email) || has(self.hostname)", message="at least one of email or hostname must be set in pkiCertificateSubject" +// +openshift:enable:FeatureGate=SigstoreImageVerificationPKI +type PKICertificateSubject struct { + // email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. + // The email should be a valid email address and at most 320 characters in length. + // +optional + // +kubebuilder:validation:MaxLength:=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address in pkiCertificateSubject" + Email string `json:"email,omitempty"` + // hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. + // The hostname should be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. + // It should consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk. + // +optional + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="self.startsWith('*.') ? !format.dns1123Subdomain().validate(self.replace('*.', '', 1)).hasValue() : !format.dns1123Subdomain().validate(self).hasValue()",message="hostname should be a valid dns 1123 subdomain name, optionally prefixed by '*.'. It should consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk." + Hostname string `json:"hostname,omitempty"` +} + // PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". // +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" @@ -157,7 +207,7 @@ type PolicyIdentity struct { // "ExactRepository" means that the identity in the signature must be in the same repository as a specific identity specified by "repository". // "RemapIdentity" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` // exactRepository is required if matchPolicy is set to "ExactRepository". // +optional @@ -175,7 +225,7 @@ type IdentityRepositoryPrefix string type PolicyMatchExactRepository struct { // repository is the reference of the image identity to be matched. // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - // +kubebuilder:validation:Required + // +required Repository IdentityRepositoryPrefix `json:"repository"` } @@ -186,12 +236,12 @@ type PolicyMatchRemapIdentity struct { // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. - // +kubebuilder:validation:Required + // +required Prefix IdentityRepositoryPrefix `json:"prefix"` // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. - // +kubebuilder:validation:Required + // +required SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go index 171e96d5b..46666ae3b 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -24,7 +24,7 @@ type InsightsDataGather struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required + // +required Spec InsightsDataGatherSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. // +optional @@ -32,33 +32,97 @@ type InsightsDataGather struct { } type InsightsDataGatherSpec struct { - // gatherConfig spec attribute includes all the configuration options related to - // gathering of the Insights data and its uploading to the ingress. + // gatherConfig spec attribute includes all the configuration options related to gathering of the Insights data and its uploading to the ingress. // +optional GatherConfig GatherConfig `json:"gatherConfig,omitempty"` } -type InsightsDataGatherStatus struct { -} +type InsightsDataGatherStatus struct{} // gatherConfig provides data gathering configuration options. type GatherConfig struct { - // dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain - // in the Insights archive data. Valid values are "None" and "ObfuscateNetworking". + // dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. + // Valid values are "None" and "ObfuscateNetworking". // When set to None the data is not obfuscated. // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. - // The current default is None. // +optional DataPolicy DataPolicy `json:"dataPolicy,omitempty"` // disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing "all" value. // If all the gatherers are disabled, the Insights operator does not gather any data. + // The format for the disabledGatherer should be: {gatherer}/{function} where the function is optional. + // Gatherer consists of a lowercase letters only that may include underscores (_). + // Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. // Run the following command to get the names of last active gatherers: // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" // An example of disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", "workloads/workload_info"]` + // +kubebuilder:validation:MaxItems=100 + // +optional + DisabledGatherers []DisabledGatherer `json:"disabledGatherers"` + // storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + // If omitted, the gathering job will use ephemeral storage. + // +optional + StorageSpec *Storage `json:"storage,omitempty"` +} + +// disabledGatherer is a string that represents a gatherer that should be disabled +// +kubebuilder:validation:MaxLength=256 +// +kubebuilder:validation:XValidation:rule=`self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$")`,message=`disabledGatherer must be in the format of {gatherer}/{function} where the gatherer and function are lowercase letters only that may include underscores (_) and are separated by a forward slash (/) if the function is provided` +type DisabledGatherer string + +// storage provides persistent storage configuration options for gathering jobs. +// If the type is set to PersistentVolume, then the PersistentVolume must be defined. +// If the type is set to Ephemeral, then the PersistentVolume must not be defined. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'PersistentVolume' ? has(self.persistentVolume) : !has(self.persistentVolume)",message="persistentVolume is required when type is PersistentVolume, and forbidden otherwise" +type Storage struct { + // type is a required field that specifies the type of storage that will be used to store the Insights data archive. + // Valid values are "PersistentVolume" and "Ephemeral". + // When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + // When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + // +required + Type StorageType `json:"type"` + // persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + // The PersistentVolume must be created in the openshift-insights namespace. + // +optional + PersistentVolume *PersistentVolumeConfig `json:"persistentVolume,omitempty"` +} + +// storageType declares valid storage types +// +kubebuilder:validation:Enum=PersistentVolume;Ephemeral +type StorageType string + +const ( + // StorageTypePersistentVolume storage type + StorageTypePersistentVolume StorageType = "PersistentVolume" + // StorageTypeEphemeral storage type + StorageTypeEphemeral StorageType = "Ephemeral" +) + +// persistentVolumeConfig provides configuration options for PersistentVolume storage. +type PersistentVolumeConfig struct { + // claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + // The PersistentVolumeClaim must be created in the openshift-insights namespace. + // +required + Claim PersistentVolumeClaimReference `json:"claim"` + // mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default mount path is /var/lib/insights-operator + // The path may not exceed 1024 characters and must not contain a colon. + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:XValidation:rule="!self.contains(':')",message="mountPath must not contain a colon" // +optional - DisabledGatherers []string `json:"disabledGatherers"` + MountPath string `json:"mountPath,omitempty"` +} + +// persistentVolumeClaimReference is a reference to a PersistentVolumeClaim. +type PersistentVolumeClaimReference struct { + // name is a string that follows the DNS1123 subdomain format. + // It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MaxLength:=253 + // +required + Name string `json:"name"` } const ( diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index ab39b5b91..17d74e0fa 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -210,6 +210,100 @@ func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoring) DeepCopyInto(out *ClusterMonitoring) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoring. +func (in *ClusterMonitoring) DeepCopy() *ClusterMonitoring { + if in == nil { + return nil + } + out := new(ClusterMonitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMonitoring) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringList) DeepCopyInto(out *ClusterMonitoringList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterMonitoring, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringList. +func (in *ClusterMonitoringList) DeepCopy() *ClusterMonitoringList { + if in == nil { + return nil + } + out := new(ClusterMonitoringList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMonitoringList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { + *out = *in + out.UserDefined = in.UserDefined + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringSpec. +func (in *ClusterMonitoringSpec) DeepCopy() *ClusterMonitoringSpec { + if in == nil { + return nil + } + out := new(ClusterMonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringStatus) DeepCopyInto(out *ClusterMonitoringStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringStatus. +func (in *ClusterMonitoringStatus) DeepCopy() *ClusterMonitoringStatus { + if in == nil { + return nil + } + out := new(ClusterMonitoringStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) { *out = *in @@ -259,9 +353,14 @@ func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { *out = *in if in.DisabledGatherers != nil { in, out := &in.DisabledGatherers, &out.DisabledGatherers - *out = make([]string, len(*in)) + *out = make([]DisabledGatherer, len(*in)) copy(*out, *in) } + if in.StorageSpec != nil { + in, out := &in.StorageSpec, &out.StorageSpec + *out = new(Storage) + (*in).DeepCopyInto(*out) + } return } @@ -475,6 +574,82 @@ func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKI) DeepCopyInto(out *PKI) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKI. +func (in *PKI) DeepCopy() *PKI { + if in == nil { + return nil + } + out := new(PKI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKICertificateSubject. +func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { + if in == nil { + return nil + } + out := new(PKICertificateSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimReference) DeepCopyInto(out *PersistentVolumeClaimReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimReference. +func (in *PersistentVolumeClaimReference) DeepCopy() *PersistentVolumeClaimReference { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeConfig) DeepCopyInto(out *PersistentVolumeConfig) { + *out = *in + out.Claim = in.Claim + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeConfig. +func (in *PersistentVolumeConfig) DeepCopy() *PersistentVolumeConfig { + if in == nil { + return nil + } + out := new(PersistentVolumeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Policy) DeepCopyInto(out *Policy) { *out = *in @@ -580,6 +755,11 @@ func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { *out = new(FulcioCAWithRekor) (*in).DeepCopyInto(*out) } + if in.PKI != nil { + in, out := &in.PKI, &out.PKI + *out = new(PKI) + (*in).DeepCopyInto(*out) + } return } @@ -676,3 +856,40 @@ func (in *RetentionSizeConfig) DeepCopy() *RetentionSizeConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + if in.PersistentVolume != nil { + in, out := &in.PersistentVolume, &out.PersistentVolume + *out = new(PersistentVolumeConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserDefinedMonitoring) DeepCopyInto(out *UserDefinedMonitoring) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefinedMonitoring. +func (in *UserDefinedMonitoring) DeepCopy() *UserDefinedMonitoring { + if in == nil { + return nil + } + out := new(UserDefinedMonitoring) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 393365b41..b9dca71a9 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -29,6 +29,7 @@ clusterimagepolicies.config.openshift.io: Category: "" FeatureGates: - SigstoreImageVerification + - SigstoreImageVerificationPKI FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -44,6 +45,30 @@ clusterimagepolicies.config.openshift.io: - SigstoreImageVerification Version: v1alpha1 +clustermonitoring.config.openshift.io: + Annotations: + description: Cluster Monitoring Operators configuration API + ApprovedPRNumber: https://github.com/openshift/api/pull/1929 + CRDName: clustermonitoring.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ClusterMonitoringConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterMonitoring + Labels: {} + PluralName: clustermonitoring + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - ClusterMonitoringConfig + Version: v1alpha1 + imagepolicies.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1457 @@ -52,6 +77,7 @@ imagepolicies.config.openshift.io: Category: "" FeatureGates: - SigstoreImageVerification + - SigstoreImageVerificationPKI FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 55468f38d..504281540 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -41,10 +41,10 @@ func (BackupSpec) SwaggerDoc() map[string]string { var map_EtcdBackupSpec = map[string]string{ "": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator", - "schedule": "Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", + "schedule": "schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", - "retentionPolicy": "RetentionPolicy defines the retention policy for retaining and deleting existing backups.", - "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", + "retentionPolicy": "retentionPolicy defines the retention policy for retaining and deleting existing backups.", + "pvcName": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", } func (EtcdBackupSpec) SwaggerDoc() map[string]string { @@ -53,7 +53,7 @@ func (EtcdBackupSpec) SwaggerDoc() map[string]string { var map_RetentionNumberConfig = map[string]string{ "": "RetentionNumberConfig specifies the configuration of the retention policy on the number of backups", - "maxNumberOfBackups": "MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", + "maxNumberOfBackups": "maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", } func (RetentionNumberConfig) SwaggerDoc() map[string]string { @@ -62,9 +62,9 @@ func (RetentionNumberConfig) SwaggerDoc() map[string]string { var map_RetentionPolicy = map[string]string{ "": "RetentionPolicy defines the retention policy for retaining and deleting existing backups. This struct is a discriminated union that allows users to select the type of retention policy from the supported types.", - "retentionType": "RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.", - "retentionNumber": "RetentionNumber configures the retention policy based on the number of backups", - "retentionSize": "RetentionSize configures the retention policy based on the size of backups", + "retentionType": "retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.", + "retentionNumber": "retentionNumber configures the retention policy based on the number of backups", + "retentionSize": "retentionSize configures the retention policy based on the size of backups", } func (RetentionPolicy) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (RetentionPolicy) SwaggerDoc() map[string]string { var map_RetentionSizeConfig = map[string]string{ "": "RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups", - "maxSizeOfBackupsGb": "MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", + "maxSizeOfBackupsGb": "maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", } func (RetentionSizeConfig) SwaggerDoc() map[string]string { @@ -118,6 +118,53 @@ func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { return map_ClusterImagePolicyStatus } +var map_ClusterMonitoring = map[string]string{ + "": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API", + "metadata": "metadata is the standard object metadata.", + "spec": "spec holds user configuration for the Cluster Monitoring Operator", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ClusterMonitoring) SwaggerDoc() map[string]string { + return map_ClusterMonitoring +} + +var map_ClusterMonitoringList = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list metadata.", + "items": "items is a list of ClusterMonitoring", +} + +func (ClusterMonitoringList) SwaggerDoc() map[string]string { + return map_ClusterMonitoringList +} + +var map_ClusterMonitoringSpec = map[string]string{ + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.", +} + +func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { + return map_ClusterMonitoringSpec +} + +var map_ClusterMonitoringStatus = map[string]string{ + "": "MonitoringOperatorStatus defines the observed state of MonitoringOperator", +} + +func (ClusterMonitoringStatus) SwaggerDoc() map[string]string { + return map_ClusterMonitoringStatus +} + +var map_UserDefinedMonitoring = map[string]string{ + "": "UserDefinedMonitoring config for user-defined projects.", + "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.", +} + +func (UserDefinedMonitoring) SwaggerDoc() map[string]string { + return map_UserDefinedMonitoring +} + var map_FulcioCAWithRekor = map[string]string{ "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", "fulcioCAData": "fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters.", @@ -167,6 +214,27 @@ func (ImagePolicyStatus) SwaggerDoc() map[string]string { return map_ImagePolicyStatus } +var map_PKI = map[string]string{ + "": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (PKI) SwaggerDoc() map[string]string { + return map_PKI +} + +var map_PKICertificateSubject = map[string]string{ + "": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "email": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email should be a valid email address and at most 320 characters in length.", + "hostname": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname should be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It should consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", +} + +func (PKICertificateSubject) SwaggerDoc() map[string]string { + return map_PKICertificateSubject +} + var map_Policy = map[string]string{ "": "Policy defines the verification policy for the items in the scopes list.", "rootOfTrust": "rootOfTrust specifies the root of trust for the policy.", @@ -217,9 +285,10 @@ func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { var map_PolicyRootOfTrust = map[string]string{ "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", - "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification.", + "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. \"PKI\" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", "publicKey": "publicKey defines the root of trust based on a sigstore public key.", "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", + "pki": "pki defines the root of trust based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates.", } func (PolicyRootOfTrust) SwaggerDoc() map[string]string { @@ -238,8 +307,9 @@ func (PublicKey) SwaggerDoc() map[string]string { var map_GatherConfig = map[string]string{ "": "gatherConfig provides data gathering configuration options.", - "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is None.", - "disabledGatherers": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`", + "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "disabledGatherers": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The format for the disabledGatherer should be: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`", + "storage": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", } func (GatherConfig) SwaggerDoc() map[string]string { @@ -274,4 +344,33 @@ func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { return map_InsightsDataGatherSpec } +var map_PersistentVolumeClaimReference = map[string]string{ + "": "persistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "name": "name is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", +} + +func (PersistentVolumeClaimReference) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimReference +} + +var map_PersistentVolumeConfig = map[string]string{ + "": "persistentVolumeConfig provides configuration options for PersistentVolume storage.", + "claim": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "mountPath": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", +} + +func (PersistentVolumeConfig) SwaggerDoc() map[string]string { + return map_PersistentVolumeConfig +} + +var map_Storage = map[string]string{ + "": "storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field.", + "persistentVolume": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/console/v1/types_console_link.go b/vendor/github.com/openshift/api/console/v1/types_console_link.go index 24a5dbadc..977fcbda9 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_link.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_link.go @@ -56,7 +56,7 @@ type ApplicationMenuSpec struct { // This can be any text that will appear as a subheading in the application menu dropdown. // A new section will be created if the text does not match text of an existing section. Section string `json:"section"` - // imageUrl is the URL for the icon used in front of the link in the application menu. + // imageURL is the URL for the icon used in front of the link in the application menu. // The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels. // +optional ImageURL string `json:"imageURL,omitempty"` diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go index 24954687d..bf5e3cb99 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -26,7 +26,8 @@ type ConsolePlugin struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required + // spec contains the desired configuration for the console plugin. + // +required Spec ConsolePluginSpec `json:"spec"` } @@ -34,20 +35,153 @@ type ConsolePlugin struct { type ConsolePluginSpec struct { // displayName is the display name of the plugin. // The dispalyName should be between 1 and 128 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 DisplayName string `json:"displayName"` // backend holds the configuration of backend which is serving console's plugin . - // +kubebuilder:validation:Required + // +required Backend ConsolePluginBackend `json:"backend"` // proxy is a list of proxies that describe various service type // to which the plugin needs to connect to. + // +listType=atomic // +optional Proxy []ConsolePluginProxy `json:"proxy,omitempty"` // i18n is the configuration of plugin's localization resources. // +optional I18n ConsolePluginI18n `json:"i18n"` + // contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. + // Each directive specifies a list of values, appropriate for the given directive type, + // for example a list of remote endpoints for fetch directives such as ScriptSrc. + // Console web application uses CSP to detect and mitigate certain types of attacks, + // such as cross-site scripting (XSS) and data injection attacks. + // Dynamic plugins should specify this field if need to load assets from outside + // the cluster or if violation reports are observed. Dynamic plugins should always prefer + // loading their assets from within the cluster, either by vendoring them, or fetching + // from a cluster service. + // CSP violation reports can be viewed in the browser's console logs during development and + // testing of the plugin in the OpenShift web console. + // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc and ConnectSrc. + // Each of the available directives may be defined only once in the list. + // The value 'self' is automatically included in all fetch directives by the OpenShift web + // console's backend. + // For more information about the CSP directives, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy + // + // The OpenShift web console server aggregates the CSP directives and values across + // its own default values and all enabled ConsolePlugin CRs, merging them into a single + // policy string that is sent to the browser via `Content-Security-Policy` HTTP response header. + // + // Example: + // ConsolePlugin A directives: + // script-src: https://script1.com/, https://script2.com/ + // font-src: https://font1.com/ + // + // ConsolePlugin B directives: + // script-src: https://script2.com/, https://script3.com/ + // font-src: https://font2.com/ + // img-src: https://img1.com/ + // + // Unified set of CSP directives, passed to the OpenShift web console server: + // script-src: https://script1.com/, https://script2.com/, https://script3.com/ + // font-src: https://font1.com/, https://font2.com/ + // img-src: https://img1.com/ + // + // OpenShift web console server CSP response header: + // Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none' + // + // +openshift:enable:FeatureGate=ConsolePluginContentSecurityPolicy + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:XValidation:rule="self.map(x, x.values.map(y, y.size()).sum()).sum() < 8192",message="the total combined size of values of all directives must not exceed 8192 (8kb)" + // +listType=map + // +listMapKey=directive + // +optional + ContentSecurityPolicy []ConsolePluginCSP `json:"contentSecurityPolicy"` +} + +// DirectiveType is an enumeration of OpenShift web console supported CSP directives. +// LoadType is an enumeration of i18n loading types. +// +kubebuilder:validation:Enum:="DefaultSrc";"ScriptSrc";"StyleSrc";"ImgSrc";"FontSrc";"ConnectSrc" +// +enum +type DirectiveType string + +const ( + // DefaultSrc directive serves as a fallback for the other CSP fetch directives. + // For more information about the DefaultSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src + DefaultSrc DirectiveType = "DefaultSrc" + // ScriptSrc directive specifies valid sources for JavaScript. + // For more information about the ScriptSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src + ScriptSrc DirectiveType = "ScriptSrc" + // StyleSrc directive specifies valid sources for stylesheets. + // For more information about the StyleSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src + StyleSrc DirectiveType = "StyleSrc" + // ImgSrc directive specifies a valid sources of images and favicons. + // For more information about the ImgSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src + ImgSrc DirectiveType = "ImgSrc" + // FontSrc directive specifies valid sources for fonts loaded using @font-face. + // For more information about the FontSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src + FontSrc DirectiveType = "FontSrc" + // ConnectSrc directive restricts the URLs which can be loaded using script interfaces. + // For more information about the ConnectSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src + ConnectSrc DirectiveType = "ConnectSrc" +) + +// CSPDirectiveValue is single value for a Content-Security-Policy directive. +// Each directive value must have a maximum length of 1024 characters and must not contain +// whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=1024 +// +kubebuilder:validation:XValidation:rule="!self.contains(\"'\")",message="CSP directive value cannot contain a quote" +// +kubebuilder:validation:XValidation:rule="!self.matches('\\\\s')",message="CSP directive value cannot contain a whitespace" +// +kubebuilder:validation:XValidation:rule="!self.contains(',')",message="CSP directive value cannot contain a comma" +// +kubebuilder:validation:XValidation:rule="!self.contains(';')",message="CSP directive value cannot contain a semi-colon" +// +kubebuilder:validation:XValidation:rule="self != '*'",message="CSP directive value cannot be a wildcard" +type CSPDirectiveValue string + +// ConsolePluginCSP holds configuration for a specific CSP directive +type ConsolePluginCSP struct { + // directive specifies which Content-Security-Policy directive to configure. + // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc and ConnectSrc. + // DefaultSrc directive serves as a fallback for the other CSP fetch directives. + // For more information about the DefaultSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src + // ScriptSrc directive specifies valid sources for JavaScript. + // For more information about the ScriptSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src + // StyleSrc directive specifies valid sources for stylesheets. + // For more information about the StyleSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src + // ImgSrc directive specifies a valid sources of images and favicons. + // For more information about the ImgSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src + // FontSrc directive specifies valid sources for fonts loaded using @font-face. + // For more information about the FontSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src + // ConnectSrc directive restricts the URLs which can be loaded using script interfaces. + // For more information about the ConnectSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src + // +required + Directive DirectiveType `json:"directive"` + // values defines an array of values to append to the console defaults for this directive. + // Each ConsolePlugin may define their own directives with their values. These will be set + // by the OpenShift web console's backend, as part of its Content-Security-Policy header. + // The array can contain at most 16 values. Each directive value must have a maximum length + // of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single + // quotes ('). The value '*' is not permitted. + // Each value in the array must be unique. + // + // +required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="each CSP directive value must be unique" + // +listType=atomic + Values []CSPDirectiveValue `json:"values"` } // LoadType is an enumeration of i18n loading types @@ -75,7 +209,7 @@ type ConsolePluginI18n struct { // When set to Preload, all localization resources are fetched when the plugin is loaded. // When set to Lazy, localization resources are lazily loaded as and when they are required by the console. // When omitted or set to the empty string, the behaviour is equivalent to Lazy type. - // +kubebuilder:validation:Required + // +required LoadType LoadType `json:"loadType"` } @@ -83,7 +217,7 @@ type ConsolePluginI18n struct { // to which console's backend will proxy the plugin's requests. type ConsolePluginProxy struct { // endpoint provides information about endpoint to which the request is proxied to. - // +kubebuilder:validation:Required + // +required Endpoint ConsolePluginProxyEndpoint `json:"endpoint"` // alias is a proxy name that identifies the plugin's proxy. An alias name // should be unique per plugin. The console backend exposes following @@ -95,7 +229,7 @@ type ConsolePluginProxy struct { // // /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` @@ -122,7 +256,7 @@ type ConsolePluginProxyEndpoint struct { // --- // + When handling unknown values, consumers should report an error and stop processing the plugin. // - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type ConsolePluginProxyType `json:"type"` // service is an in-cluster Service that the plugin will connect to. @@ -162,18 +296,18 @@ const ( // console's backend will proxy the plugin's requests. type ConsolePluginProxyServiceConfig struct { // name of Service that the plugin needs to connect to. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Name string `json:"name"` // namespace of Service that the plugin needs to connect to - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Namespace string `json:"namespace"` // port on which the Service that the plugin needs to connect to // is listening on. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 Port int32 `json:"port"` @@ -197,7 +331,7 @@ type ConsolePluginBackend struct { // --- // + When handling unknown values, consumers should report an error and stop processing the plugin. // - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type ConsolePluginBackendType `json:"type"` // service is a Kubernetes Service that exposes the plugin using a @@ -212,17 +346,17 @@ type ConsolePluginBackend struct { // console dynamic plugin assets. type ConsolePluginService struct { // name of Service that is serving the plugin assets. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Name string `json:"name"` // namespace of Service that is serving the plugin assets. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Namespace string `json:"namespace"` // port on which the Service that is serving the plugin is listening to. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 Port int32 `json:"port"` diff --git a/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go b/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go index bb62fb8fc..1eef701e8 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go @@ -28,7 +28,6 @@ type ConsoleQuickStart struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec ConsoleQuickStartSpec `json:"spec"` } @@ -36,7 +35,6 @@ type ConsoleQuickStart struct { // ConsoleQuickStartSpec is the desired quick start configuration. type ConsoleQuickStartSpec struct { // displayName is the display name of the Quick Start. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required DisplayName string `json:"displayName"` @@ -48,12 +46,10 @@ type ConsoleQuickStartSpec struct { // +optional Tags []string `json:"tags,omitempty"` // durationMinutes describes approximately how many minutes it will take to complete the Quick Start. - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +required DurationMinutes int `json:"durationMinutes"` // description is the description of the Quick Start. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +required @@ -62,12 +58,10 @@ type ConsoleQuickStartSpec struct { // +optional Prerequisites []string `json:"prerequisites,omitempty"` // introduction describes the purpose of the Quick Start. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Introduction string `json:"introduction"` // tasks is the list of steps the user has to perform to complete the Quick Start. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +required Tasks []ConsoleQuickStartTask `json:"tasks"` @@ -87,12 +81,10 @@ type ConsoleQuickStartSpec struct { // ConsoleQuickStartTask is a single step in a Quick Start. type ConsoleQuickStartTask struct { // title describes the task and is displayed as a step heading. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Title string `json:"title"` // description describes the steps needed to complete the task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Description string `json:"description"` @@ -109,12 +101,10 @@ type ConsoleQuickStartTask struct { type ConsoleQuickStartTaskReview struct { // instructions contains steps that user needs to take in order // to validate his work after going through a task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Instructions string `json:"instructions"` // failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required FailedTaskHelp string `json:"failedTaskHelp"` @@ -123,12 +113,10 @@ type ConsoleQuickStartTaskReview struct { // ConsoleQuickStartTaskSummary contains information about a passed step. type ConsoleQuickStartTaskSummary struct { // success describes the succesfully passed task. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Success string `json:"success"` // failed briefly describes the unsuccessfully passed task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required diff --git a/vendor/github.com/openshift/api/console/v1/types_console_sample.go b/vendor/github.com/openshift/api/console/v1/types_console_sample.go index c0175bf9b..bd0f65696 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_sample.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_sample.go @@ -25,7 +25,7 @@ type ConsoleSample struct { metav1.ObjectMeta `json:"metadata"` // spec contains configuration for a console sample. - // +kubebuilder:validation:Required + // +required Spec ConsoleSampleSpec `json:"spec"` } @@ -35,7 +35,7 @@ type ConsoleSampleSpec struct { // title is the display name of the sample. // // It is required and must be no more than 50 characters in length. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=50 Title string `json:"title"` @@ -46,7 +46,7 @@ type ConsoleSampleSpec struct { // // The abstract is shown on the sample card tile below the title and provider // and is limited to three lines of content. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=100 Abstract string `json:"abstract"` @@ -56,7 +56,7 @@ type ConsoleSampleSpec struct { // // It is a README.md-like content for additional information, links, pre-conditions, and other instructions. // It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=4096 Description string `json:"description"` @@ -119,7 +119,7 @@ type ConsoleSampleSpec struct { // source defines where to deploy the sample service from. // The sample may be sourced from an external git repository or container image. - // +kubebuilder:validation:Required + // +required Source ConsoleSampleSource `json:"source"` } @@ -143,7 +143,7 @@ const ( type ConsoleSampleSource struct { // type of the sample, currently supported: "GitImport";"ContainerImport" // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:="GitImport";"ContainerImport" Type ConsoleSampleSourceType `json:"type"` @@ -161,7 +161,7 @@ type ConsoleSampleSource struct { // ConsoleSampleGitImportSource let the user import code from a public Git repository. type ConsoleSampleGitImportSource struct { // repository contains the reference to the actual Git repository. - // +kubebuilder:validation:Required + // +required Repository ConsoleSampleGitImportSourceRepository `json:"repository"` // service contains configuration for the Service resource created for this sample. // +optional @@ -183,7 +183,7 @@ type ConsoleSampleGitImportSourceRepository struct { // - https://bitbucket.org// // // The url must have a maximum length of 256 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:Pattern=`^https:\/\/(github.com|gitlab.com|bitbucket.org)\/[a-zA-Z0-9-]+\/[a-zA-Z0-9-]+(.git)?$` @@ -232,7 +232,7 @@ type ConsoleSampleContainerImportSource struct { // - quay.io// // - quay.io//@sha256: // - quay.io//: - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 Image string `json:"image"` diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go index b7cd66da0..d4fefaa37 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -416,6 +416,27 @@ func (in *ConsolePluginBackend) DeepCopy() *ConsolePluginBackend { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginCSP) DeepCopyInto(out *ConsolePluginCSP) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]CSPDirectiveValue, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginCSP. +func (in *ConsolePluginCSP) DeepCopy() *ConsolePluginCSP { + if in == nil { + return nil + } + out := new(ConsolePluginCSP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsolePluginI18n) DeepCopyInto(out *ConsolePluginI18n) { *out = *in @@ -547,6 +568,13 @@ func (in *ConsolePluginSpec) DeepCopyInto(out *ConsolePluginSpec) { } } out.I18n = in.I18n + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = make([]ConsolePluginCSP, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml index 98abc7147..250f873a0 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml @@ -137,7 +137,8 @@ consoleplugins.console.openshift.io: CRDName: consoleplugins.console.openshift.io Capability: Console Category: "" - FeatureGates: [] + FeatureGates: + - ConsolePluginContentSecurityPolicy FilenameOperatorName: "" FilenameOperatorOrdering: "90" FilenameRunLevel: "" diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go index c6f2070fa..a02cbf7c1 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -91,7 +91,7 @@ func (ConsoleExternalLogLinkSpec) SwaggerDoc() map[string]string { var map_ApplicationMenuSpec = map[string]string{ "": "ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu.", "section": "section is the section of the application menu in which the link should appear. This can be any text that will appear as a subheading in the application menu dropdown. A new section will be created if the text does not match text of an existing section.", - "imageURL": "imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", + "imageURL": "imageURL is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", } func (ApplicationMenuSpec) SwaggerDoc() map[string]string { @@ -171,6 +171,7 @@ func (ConsoleNotificationSpec) SwaggerDoc() map[string]string { var map_ConsolePlugin = map[string]string{ "": "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the desired configuration for the console plugin.", } func (ConsolePlugin) SwaggerDoc() map[string]string { @@ -187,6 +188,16 @@ func (ConsolePluginBackend) SwaggerDoc() map[string]string { return map_ConsolePluginBackend } +var map_ConsolePluginCSP = map[string]string{ + "": "ConsolePluginCSP holds configuration for a specific CSP directive", + "directive": "directive specifies which Content-Security-Policy directive to configure. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc and ConnectSrc. DefaultSrc directive serves as a fallback for the other CSP fetch directives. For more information about the DefaultSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src ScriptSrc directive specifies valid sources for JavaScript. For more information about the ScriptSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src StyleSrc directive specifies valid sources for stylesheets. For more information about the StyleSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src ImgSrc directive specifies a valid sources of images and favicons. For more information about the ImgSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src FontSrc directive specifies valid sources for fonts loaded using @font-face. For more information about the FontSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src ConnectSrc directive restricts the URLs which can be loaded using script interfaces. For more information about the ConnectSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src", + "values": "values defines an array of values to append to the console defaults for this directive. Each ConsolePlugin may define their own directives with their values. These will be set by the OpenShift web console's backend, as part of its Content-Security-Policy header. The array can contain at most 16 values. Each directive value must have a maximum length of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. Each value in the array must be unique.", +} + +func (ConsolePluginCSP) SwaggerDoc() map[string]string { + return map_ConsolePluginCSP +} + var map_ConsolePluginI18n = map[string]string{ "": "ConsolePluginI18n holds information on localization resources that are served by the dynamic plugin.", "loadType": "loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type.", @@ -251,11 +262,12 @@ func (ConsolePluginService) SwaggerDoc() map[string]string { } var map_ConsolePluginSpec = map[string]string{ - "": "ConsolePluginSpec is the desired plugin configuration.", - "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", - "backend": "backend holds the configuration of backend which is serving console's plugin .", - "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", - "i18n": "i18n is the configuration of plugin's localization resources.", + "": "ConsolePluginSpec is the desired plugin configuration.", + "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", + "backend": "backend holds the configuration of backend which is serving console's plugin .", + "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", + "i18n": "i18n is the configuration of plugin's localization resources.", + "contentSecurityPolicy": "contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. Each directive specifies a list of values, appropriate for the given directive type, for example a list of remote endpoints for fetch directives such as ScriptSrc. Console web application uses CSP to detect and mitigate certain types of attacks, such as cross-site scripting (XSS) and data injection attacks. Dynamic plugins should specify this field if need to load assets from outside the cluster or if violation reports are observed. Dynamic plugins should always prefer loading their assets from within the cluster, either by vendoring them, or fetching from a cluster service. CSP violation reports can be viewed in the browser's console logs during development and testing of the plugin in the OpenShift web console. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc and ConnectSrc. Each of the available directives may be defined only once in the list. The value 'self' is automatically included in all fetch directives by the OpenShift web console's backend. For more information about the CSP directives, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy\n\nThe OpenShift web console server aggregates the CSP directives and values across its own default values and all enabled ConsolePlugin CRs, merging them into a single policy string that is sent to the browser via `Content-Security-Policy` HTTP response header.\n\nExample:\n ConsolePlugin A directives:\n script-src: https://script1.com/, https://script2.com/\n font-src: https://font1.com/\n\n ConsolePlugin B directives:\n script-src: https://script2.com/, https://script3.com/\n font-src: https://font2.com/\n img-src: https://img1.com/\n\n Unified set of CSP directives, passed to the OpenShift web console server:\n script-src: https://script1.com/, https://script2.com/, https://script3.com/\n font-src: https://font1.com/, https://font2.com/\n img-src: https://img1.com/\n\n OpenShift web console server CSP response header:\n Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none'", } func (ConsolePluginSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index 5651bbcc9..e3a8c94cf 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -25,3 +25,29 @@ releases: envtest-v1.31.1-linux-arm64.tar.gz: hash: 86fa42c6a3d92e438e35d6066587d0e4f36b910885e10520868959ece2fe740d99abc735f69d6ebe8920291f70d3819b169ad5ddd2db805f8f56a3b83eee3893 selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.1-linux-arm64.tar.gz + v1.31.2: + envtest-v1.31.2-darwin-amd64.tar.gz: + hash: 4356c4495be7adc311868569bd69c5c17bfdabc243db3c656ac598be87698647e59d030a5f3c659b5ee0084bb0a9d33ea1faa2f5abfe0d762ec3368877cfd17f + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-darwin-amd64.tar.gz + envtest-v1.31.2-darwin-arm64.tar.gz: + hash: e1a759927343dfbbdff2909b7ea0046eb5c6840aea763b8d5d8229931fa35dcdcd5659fdace7a4eab1e41bc0b04c683aa96508f26aa38b3b5d3945799cb02324 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-darwin-arm64.tar.gz + envtest-v1.31.2-linux-amd64.tar.gz: + hash: c9efa849326afc471aff9ee17109491fe3e4d6d76b6d24e6ee8787ef44776abdc57ce6e96f013abf86c91d4ee94660e617a1623d9a71dd95238b6b6bd800aef7 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-linux-amd64.tar.gz + envtest-v1.31.2-linux-arm64.tar.gz: + hash: f6ad42b701537ddfd6873e9700f8e73927763878eaf36a5437d71fb62bffda91ce7f502e13f9ef4b508d37973ccddd3d847eba0d7150f7acb5495fd82558fbad + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-linux-arm64.tar.gz + v1.32.1: + envtest-v1.32.1-darwin-amd64.tar.gz: + hash: e81d0b8e9d58bcefc8e741e298698670a39bf77923623fb8554b1a4b201a033678d2949e18dcf6933722c69f954b0de93c8f7136ff0641f69e5128a5a3fb6b26 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.32.1-darwin-amd64.tar.gz + envtest-v1.32.1-darwin-arm64.tar.gz: + hash: 57be0af5cbf72b659c14f955205fa9a95da9af9213bc9b6a5a1090394a0cd5f98c57127b3d8a69dc349bc33112f52505a6f030369bb09a27f9fb2c13a66475d1 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.32.1-darwin-arm64.tar.gz + envtest-v1.32.1-linux-amd64.tar.gz: + hash: 711c6d6d9443dce6b465403149837d636f440091b77ec45753d9c60fea0d6ba7811b0045ebf16f7b74504d1f47fcf1da90d7c810a18be31311c90f068d9fd1fd + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.32.1-linux-amd64.tar.gz + envtest-v1.32.1-linux-arm64.tar.gz: + hash: 0bc52e6344ae0753715bc39c2878696c72a3129356df484835586165238361c109ad3e1ebd354af8ecdf1026c3a2b98ed225ad0c6dd348cb3ff128a7cfdcc2f8 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.32.1-linux-arm64.tar.gz diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 660d3bff1..2ebfc0b82 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -6,37 +6,48 @@ | MachineAPIMigration| | | | | | | | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | MultiArchInstallAzure| | | | | | | -| GatewayAPI| | | Enabled | Enabled | | | +| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | +| Example2| | | Enabled | Enabled | | | +| NewOLMCatalogdAPIV1Metas| | | | Enabled | | Enabled | +| SELinuxChangePolicy| | | Enabled | Enabled | | | +| SELinuxMount| | | Enabled | Enabled | | | +| ShortCertRotation| | | Enabled | Enabled | | | +| SigstoreImageVerificationPKI| | | Enabled | Enabled | | | +| NewOLM| | Enabled | | Enabled | | Enabled | | AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | -| AdditionalRoutingCapabilities| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | -| CSIDriverSharedResource| | | Enabled | Enabled | Enabled | Enabled | +| CPMSMachineNamePrefix| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | +| ConsolePluginContentSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | +| DualReplica| | | Enabled | Enabled | Enabled | Enabled | +| DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | | Example| | | Enabled | Enabled | Enabled | Enabled | | GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | +| GCPCustomAPIEndpoints| | | Enabled | Enabled | Enabled | Enabled | +| GatewayAPI| | | Enabled | Enabled | Enabled | Enabled | +| GatewayAPIController| | | Enabled | Enabled | Enabled | Enabled | +| HighlyAvailableArbiter| | | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | | IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | | InsightsRuntimeExtractor| | | Enabled | Enabled | Enabled | Enabled | +| KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | | MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled | | MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesAWS| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | | MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | -| NetworkSegmentation| | | Enabled | Enabled | Enabled | Enabled | -| NewOLM| | | Enabled | Enabled | Enabled | Enabled | | NodeSwap| | | Enabled | Enabled | Enabled | Enabled | +| NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | Enabled | Enabled | | OnClusterBuild| | | Enabled | Enabled | Enabled | Enabled | -| PersistentIPsForVirtualization| | | Enabled | Enabled | Enabled | Enabled | | PinnedImages| | | Enabled | Enabled | Enabled | Enabled | | PlatformOperators| | | Enabled | Enabled | Enabled | Enabled | | ProcMountType| | | Enabled | Enabled | Enabled | Enabled | @@ -49,12 +60,15 @@ | UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled | | UserNamespacesPodSecurityStandards| | | Enabled | Enabled | Enabled | Enabled | | UserNamespacesSupport| | | Enabled | Enabled | Enabled | Enabled | +| VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | Enabled | Enabled | +| VSphereHostVMGroupZonal| | | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiDisk| | | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiVCenters| | | Enabled | Enabled | Enabled | Enabled | | VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | | AWSEFSDriverVolumeMetrics| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -68,15 +82,19 @@ | IngressControllerLBSubnetsAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MultiArchInstallAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MultiArchInstallGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NodeDisruptionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PersistentIPsForVirtualization| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | SetEIPForNLBIngressController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereDriverConfiguration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiVCenters| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ValidatingAdmissionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go index 91f25fb67..793cb1938 100644 --- a/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go +++ b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go @@ -27,7 +27,6 @@ type HelmChartRepository struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec HelmChartRepositorySpec `json:"spec"` @@ -100,5 +99,7 @@ type HelmChartRepositoryStatus struct { // conditions is a list of conditions and their statuses // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go index 37ff581c1..8049c4fe5 100644 --- a/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go +++ b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go @@ -26,7 +26,6 @@ type ProjectHelmChartRepository struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ProjectHelmChartRepositorySpec `json:"spec"` diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto index 6b5f24cb2..dabdc6d84 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.proto +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -47,39 +47,39 @@ message Image { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // DockerImageReference is the string that can be used to pull this image. + // dockerImageReference is the string that can be used to pull this image. optional string dockerImageReference = 2; - // DockerImageMetadata contains metadata about this image + // dockerImageMetadata contains metadata about this image // +patchStrategy=replace // +kubebuilder:pruning:PreserveUnknownFields optional .k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; - // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" optional string dockerImageMetadataVersion = 4; - // DockerImageManifest is the raw JSON of the manifest + // dockerImageManifest is the raw JSON of the manifest optional string dockerImageManifest = 5; - // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. repeated ImageLayer dockerImageLayers = 6; - // Signatures holds all signatures of the image. + // signatures holds all signatures of the image. // +patchMergeKey=name // +patchStrategy=merge repeated ImageSignature signatures = 7; - // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. repeated bytes dockerImageSignatures = 8; - // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. optional string dockerImageManifestMediaType = 9; - // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. // Will not be set when the image represents a manifest list. optional string dockerImageConfig = 10; - // DockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. // When this field is present, no DockerImageLayers should be specified. repeated ImageManifest dockerImageManifests = 11; } @@ -114,56 +114,56 @@ message ImageBlobReferences { // ImageImportSpec describes a request to import a specific image. message ImageImportSpec { - // From is the source of an image to import; only kind DockerImage is allowed + // from is the source of an image to import; only kind DockerImage is allowed optional .k8s.io.api.core.v1.ObjectReference from = 1; - // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used optional .k8s.io.api.core.v1.LocalObjectReference to = 2; - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported optional TagImportPolicy importPolicy = 3; - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image optional TagReferencePolicy referencePolicy = 5; - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response optional bool includeManifest = 4; } // ImageImportStatus describes the result of an image import. message ImageImportStatus { - // Status is the status of the image import, including errors encountered while retrieving the image + // status is the status of the image import, including errors encountered while retrieving the image optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; - // Image is the metadata of that image, if the image was located + // image is the metadata of that image, if the image was located optional Image image = 2; - // Tag is the tag this image was located under, if any + // tag is the tag this image was located under, if any optional string tag = 3; - // Manifests holds sub-manifests metadata when importing a manifest list + // manifests holds sub-manifests metadata when importing a manifest list repeated Image manifests = 4; } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. message ImageLayer { - // Name of the layer as defined by the underlying store. + // name of the layer as defined by the underlying store. optional string name = 1; - // Size of the layer in bytes as defined by the underlying store. + // size of the layer in bytes as defined by the underlying store. optional int64 size = 2; - // MediaType of the referenced object. + // mediaType of the referenced object. optional string mediaType = 3; } // ImageLayerData contains metadata about an image layer. message ImageLayerData { - // Size of the layer in bytes as defined by the underlying store. This field is + // size of the layer in bytes as defined by the underlying store. This field is // optional if the necessary information about size is not available. optional int64 size = 1; - // MediaType of the referenced object. + // mediaType of the referenced object. optional string mediaType = 2; } @@ -176,7 +176,7 @@ message ImageList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of images + // items is a list of images repeated Image items = 2; } @@ -194,23 +194,23 @@ message ImageLookupPolicy { // ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular // Image object. message ImageManifest { - // Digest is the unique identifier for the manifest. It refers to an Image object. + // digest is the unique identifier for the manifest. It refers to an Image object. optional string digest = 1; - // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. optional string mediaType = 2; - // ManifestSize represents the size of the raw object contents, in bytes. + // manifestSize represents the size of the raw object contents, in bytes. optional int64 manifestSize = 3; - // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. optional string architecture = 4; - // OS specifies the operating system, for example `linux`. + // os specifies the operating system, for example `linux`. optional string os = 5; - // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU // variant of the ARM CPU. optional string variant = 6; } @@ -234,7 +234,7 @@ message ImageSignature { // Required: An opaque binary string which is an image's signature. optional bytes content = 3; - // Conditions represent the latest available observations of a signature's current state. + // conditions represent the latest available observations of a signature's current state. // +patchMergeKey=type // +patchStrategy=merge repeated SignatureCondition conditions = 4; @@ -280,11 +280,11 @@ message ImageStream { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec describes the desired state of this stream + // spec describes the desired state of this stream // +optional optional ImageStreamSpec spec = 2; - // Status describes the current state of this stream + // status describes the current state of this stream // +optional optional ImageStreamStatus status = 3; } @@ -309,7 +309,7 @@ message ImageStreamImage { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Image associated with the ImageStream and image name. + // image associated with the ImageStream and image name. optional Image image = 2; } @@ -329,36 +329,36 @@ message ImageStreamImport { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is a description of the images that the user wishes to import + // spec is a description of the images that the user wishes to import optional ImageStreamImportSpec spec = 2; - // Status is the result of importing the image + // status is the result of importing the image optional ImageStreamImportStatus status = 3; } // ImageStreamImportSpec defines what images should be imported. message ImageStreamImportSpec { - // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // import indicates whether to perform an import - if so, the specified tags are set on the spec // and status of the image stream defined by the type meta. optional bool import = 1; - // Repository is an optional import of an entire container image repository. A maximum limit on the + // repository is an optional import of an entire container image repository. A maximum limit on the // number of tags imported this way is imposed by the server. optional RepositoryImportSpec repository = 2; - // Images are a list of individual images to import. + // images are a list of individual images to import. repeated ImageImportSpec images = 3; } // ImageStreamImportStatus contains information about the status of an image stream import. message ImageStreamImportStatus { - // Import is the image stream that was successfully updated or created when 'to' was set. + // import is the image stream that was successfully updated or created when 'to' was set. optional ImageStream import = 1; - // Repository is set if spec.repository was set to the outcome of the import + // repository is set if spec.repository was set to the outcome of the import optional RepositoryImportStatus repository = 2; - // Images is set with the result of importing spec.images + // images is set with the result of importing spec.images repeated ImageImportStatus images = 3; } @@ -389,7 +389,7 @@ message ImageStreamList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of imageStreams + // items is a list of imageStreams repeated ImageStream items = 2; } @@ -409,10 +409,10 @@ message ImageStreamMapping { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Image is a container image. + // image is a container image. optional Image image = 2; - // Tag is a string value this image can be located with inside the stream. + // tag is a string value this image can be located with inside the stream. optional string tag = 3; } @@ -434,16 +434,16 @@ message ImageStreamSpec { // ImageStreamStatus contains information about the state of this image stream. message ImageStreamStatus { - // DockerImageRepository represents the effective location this stream may be accessed at. + // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located optional string dockerImageRepository = 1; - // PublicDockerImageRepository represents the public location from where the image can + // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. optional string publicDockerImageRepository = 3; - // Tags are a historical record of images associated with each tag. The first entry in the + // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge @@ -496,7 +496,7 @@ message ImageStreamTagList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of image stream tags + // items is the list of image stream tags repeated ImageStreamTag items = 2; } @@ -543,46 +543,46 @@ message ImageTagList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of image stream tags + // items is the list of image stream tags repeated ImageTag items = 2; } // NamedTagEventList relates a tag to its image history. message NamedTagEventList { - // Tag is the tag for which the history is recorded + // tag is the tag for which the history is recorded optional string tag = 1; // Standard object's metadata. repeated TagEvent items = 2; - // Conditions is an array of conditions that apply to the tag event list. + // conditions is an array of conditions that apply to the tag event list. repeated TagEventCondition conditions = 3; } // RepositoryImportSpec describes a request to import images from a container image repository. message RepositoryImportSpec { - // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed optional .k8s.io.api.core.v1.ObjectReference from = 1; - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported optional TagImportPolicy importPolicy = 2; - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image optional TagReferencePolicy referencePolicy = 4; - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response optional bool includeManifest = 3; } // RepositoryImportStatus describes the result of an image repository import message RepositoryImportStatus { - // Status reflects whether any failure occurred during import + // status reflects whether any failure occurred during import optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; - // Images is a list of images successfully retrieved by the import of the repository. + // images is a list of images successfully retrieved by the import of the repository. repeated ImageImportStatus images = 2; - // AdditionalTags are tags that exist in the repository but were not imported because + // additionalTags are tags that exist in the repository but were not imported because // a maximum limit of automatic imports was applied. repeated string additionalTags = 3; } @@ -602,10 +602,10 @@ message SecretList { // SignatureCondition describes an image signature condition of particular kind at particular probe time. message SignatureCondition { - // Type of signature condition, Complete or Failed. + // type of signature condition, Complete or Failed. optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; // Last time the condition was checked. @@ -624,7 +624,7 @@ message SignatureCondition { // SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject // of signing certificate or key. message SignatureGenericEntity { - // Organization name. + // organization name. optional string organization = 1; // Common name (e.g. openshift-signing-service). @@ -648,55 +648,55 @@ message SignatureSubject { // TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. message TagEvent { - // Created holds the time the TagEvent was created + // created holds the time the TagEvent was created optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; - // DockerImageReference is the string that can be used to pull this image + // dockerImageReference is the string that can be used to pull this image optional string dockerImageReference = 2; - // Image is the image + // image is the image optional string image = 3; - // Generation is the spec tag generation that resulted in this tag being updated + // generation is the spec tag generation that resulted in this tag being updated optional int64 generation = 4; } // TagEventCondition contains condition information for a tag event. message TagEventCondition { - // Type of tag event condition, currently only ImportSuccess + // type of tag event condition, currently only ImportSuccess optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; - // LastTransitionTIme is the time the condition transitioned from one status to another. + // lastTransitionTime is the time the condition transitioned from one status to another. optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Reason is a brief machine readable explanation for the condition's last transition. + // reason is a brief machine readable explanation for the condition's last transition. optional string reason = 4; - // Message is a human readable description of the details about last transition, complementing reason. + // message is a human readable description of the details about last transition, complementing reason. optional string message = 5; - // Generation is the spec tag generation that this status corresponds to + // generation is the spec tag generation that this status corresponds to optional int64 generation = 6; } // TagImportPolicy controls how images related to this tag will be imported. message TagImportPolicy { - // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. optional bool insecure = 1; - // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported optional bool scheduled = 2; - // ImportMode describes how to import an image manifest. + // importMode describes how to import an image manifest. optional string importMode = 3; } // TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. message TagReference { - // Name of the tag + // name of the tag optional string name = 1; // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. @@ -708,11 +708,11 @@ message TagReference { // can only reference a tag within this same ImageStream. optional .k8s.io.api.core.v1.ObjectReference from = 3; - // Reference states if the tag will be imported. Default value is false, which means the tag will + // reference states if the tag will be imported. Default value is false, which means the tag will // be imported. optional bool reference = 4; - // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference // is changed the generation is set to match the current stream generation (which is incremented every // time spec is changed). Other processes in the system like the image importer observe that the // generation of spec tag is newer than the generation recorded in the status and use that as a trigger @@ -722,10 +722,10 @@ message TagReference { // +optional optional int64 generation = 5; - // ImportPolicy is information that controls how images may be imported by the server. + // importPolicy is information that controls how images may be imported by the server. optional TagImportPolicy importPolicy = 6; - // ReferencePolicy defines how other components should consume the image. + // referencePolicy defines how other components should consume the image. optional TagReferencePolicy referencePolicy = 7; } @@ -733,7 +733,7 @@ message TagReference { // image change triggers in deployment configs or builds are resolved. This allows the image stream // author to control how images are accessed. message TagReferencePolicy { - // Type determines how the image pull spec should be transformed when the image stream tag is used in + // type determines how the image pull spec should be transformed when the image stream tag is used in // deployment config triggers or new builds. The default value is `Source`, indicating the original // location of the image should be used (if imported). The user may also specify `Local`, indicating // that the pull spec should point to the integrated container image registry and leverage the registry's diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go index 9919c0fe7..d4ee4bff6 100644 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -19,7 +19,7 @@ type ImageList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of images + // items is a list of images Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -45,30 +45,30 @@ type Image struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // DockerImageReference is the string that can be used to pull this image. + // dockerImageReference is the string that can be used to pull this image. DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` - // DockerImageMetadata contains metadata about this image + // dockerImageMetadata contains metadata about this image // +patchStrategy=replace // +kubebuilder:pruning:PreserveUnknownFields DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` - // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` - // DockerImageManifest is the raw JSON of the manifest + // dockerImageManifest is the raw JSON of the manifest DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` - // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"` - // Signatures holds all signatures of the image. + // signatures holds all signatures of the image. // +patchMergeKey=name // +patchStrategy=merge Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` - // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` - // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` - // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. // Will not be set when the image represents a manifest list. DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` - // DockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. // When this field is present, no DockerImageLayers should be specified. DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"` } @@ -76,29 +76,29 @@ type Image struct { // ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular // Image object. type ImageManifest struct { - // Digest is the unique identifier for the manifest. It refers to an Image object. + // digest is the unique identifier for the manifest. It refers to an Image object. Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"` - // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` - // ManifestSize represents the size of the raw object contents, in bytes. + // manifestSize represents the size of the raw object contents, in bytes. ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"` - // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"` - // OS specifies the operating system, for example `linux`. + // os specifies the operating system, for example `linux`. OS string `json:"os" protobuf:"bytes,5,opt,name=os"` - // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU // variant of the ARM CPU. Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"` } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. type ImageLayer struct { - // Name of the layer as defined by the underlying store. + // name of the layer as defined by the underlying store. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Size of the layer in bytes as defined by the underlying store. + // size of the layer in bytes as defined by the underlying store. LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` - // MediaType of the referenced object. + // mediaType of the referenced object. MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` } @@ -126,7 +126,7 @@ type ImageSignature struct { Type string `json:"type" protobuf:"bytes,2,opt,name=type"` // Required: An opaque binary string which is an image's signature. Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` - // Conditions represent the latest available observations of a signature's current state. + // conditions represent the latest available observations of a signature's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` @@ -154,9 +154,9 @@ type SignatureConditionType string // SignatureCondition describes an image signature condition of particular kind at particular probe time. type SignatureCondition struct { - // Type of signature condition, Complete or Failed. + // type of signature condition, Complete or Failed. Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // Last time the condition was checked. LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` @@ -171,7 +171,7 @@ type SignatureCondition struct { // SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject // of signing certificate or key. type SignatureGenericEntity struct { - // Organization name. + // organization name. Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` // Common name (e.g. openshift-signing-service). CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` @@ -204,7 +204,7 @@ type ImageStreamList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of imageStreams + // items is a list of imageStreams Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -237,10 +237,10 @@ type ImageStream struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec describes the desired state of this stream + // spec describes the desired state of this stream // +optional Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current state of this stream + // status describes the current state of this stream // +optional Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -272,7 +272,7 @@ type ImageLookupPolicy struct { // TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. type TagReference struct { - // Name of the tag + // name of the tag Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. // +optional @@ -281,10 +281,10 @@ type TagReference struct { // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references // can only reference a tag within this same ImageStream. From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` - // Reference states if the tag will be imported. Default value is false, which means the tag will + // reference states if the tag will be imported. Default value is false, which means the tag will // be imported. Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` - // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference // is changed the generation is set to match the current stream generation (which is incremented every // time spec is changed). Other processes in the system like the image importer observe that the // generation of spec tag is newer than the generation recorded in the status and use that as a trigger @@ -293,19 +293,19 @@ type TagReference struct { // nil which will be merged with the current tag generation. // +optional Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` - // ImportPolicy is information that controls how images may be imported by the server. + // importPolicy is information that controls how images may be imported by the server. ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image. + // referencePolicy defines how other components should consume the image. ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` } // TagImportPolicy controls how images related to this tag will be imported. type TagImportPolicy struct { - // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` - // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` - // ImportMode describes how to import an image manifest. + // importMode describes how to import an image manifest. ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"` } @@ -342,7 +342,7 @@ const ( // image change triggers in deployment configs or builds are resolved. This allows the image stream // author to control how images are accessed. type TagReferencePolicy struct { - // Type determines how the image pull spec should be transformed when the image stream tag is used in + // type determines how the image pull spec should be transformed when the image stream tag is used in // deployment config triggers or new builds. The default value is `Source`, indicating the original // location of the image should be used (if imported). The user may also specify `Local`, indicating // that the pull spec should point to the integrated container image registry and leverage the registry's @@ -355,14 +355,14 @@ type TagReferencePolicy struct { // ImageStreamStatus contains information about the state of this image stream. type ImageStreamStatus struct { - // DockerImageRepository represents the effective location this stream may be accessed at. + // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` - // PublicDockerImageRepository represents the public location from where the image can + // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` - // Tags are a historical record of images associated with each tag. The first entry in the + // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge @@ -371,23 +371,23 @@ type ImageStreamStatus struct { // NamedTagEventList relates a tag to its image history. type NamedTagEventList struct { - // Tag is the tag for which the history is recorded + // tag is the tag for which the history is recorded Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` // Standard object's metadata. Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` - // Conditions is an array of conditions that apply to the tag event list. + // conditions is an array of conditions that apply to the tag event list. Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` } // TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. type TagEvent struct { - // Created holds the time the TagEvent was created + // created holds the time the TagEvent was created Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` - // DockerImageReference is the string that can be used to pull this image + // dockerImageReference is the string that can be used to pull this image DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` - // Image is the image + // image is the image Image string `json:"image" protobuf:"bytes,3,opt,name=image"` - // Generation is the spec tag generation that resulted in this tag being updated + // generation is the spec tag generation that resulted in this tag being updated Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` } @@ -401,17 +401,17 @@ const ( // TagEventCondition contains condition information for a tag event. type TagEventCondition struct { - // Type of tag event condition, currently only ImportSuccess + // type of tag event condition, currently only ImportSuccess Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // LastTransitionTIme is the time the condition transitioned from one status to another. + // lastTransitionTime is the time the condition transitioned from one status to another. LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is a brief machine readable explanation for the condition's last transition. + // reason is a brief machine readable explanation for the condition's last transition. Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Message is a human readable description of the details about last transition, complementing reason. + // message is a human readable description of the details about last transition, complementing reason. Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` - // Generation is the spec tag generation that this status corresponds to + // generation is the spec tag generation that this status corresponds to Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` } @@ -438,9 +438,9 @@ type ImageStreamMapping struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Image is a container image. + // image is a container image. Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` - // Tag is a string value this image can be located with inside the stream. + // tag is a string value this image can be located with inside the stream. Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` } @@ -500,7 +500,7 @@ type ImageStreamTagList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of image stream tags + // items is the list of image stream tags Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -555,7 +555,7 @@ type ImageTagList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of image stream tags + // items is the list of image stream tags Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -585,7 +585,7 @@ type ImageStreamImage struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Image associated with the ImageStream and image name. + // image associated with the ImageStream and image name. Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` } @@ -651,10 +651,10 @@ type ImageBlobReferences struct { // ImageLayerData contains metadata about an image layer. type ImageLayerData struct { - // Size of the layer in bytes as defined by the underlying store. This field is + // size of the layer in bytes as defined by the underlying store. This field is // optional if the necessary information about size is not available. LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` - // MediaType of the referenced object. + // mediaType of the referenced object. MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` } @@ -680,82 +680,82 @@ type ImageStreamImport struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is a description of the images that the user wishes to import + // spec is a description of the images that the user wishes to import Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status is the result of importing the image + // status is the result of importing the image Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` } // ImageStreamImportSpec defines what images should be imported. type ImageStreamImportSpec struct { - // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // import indicates whether to perform an import - if so, the specified tags are set on the spec // and status of the image stream defined by the type meta. Import bool `json:"import" protobuf:"varint,1,opt,name=import"` - // Repository is an optional import of an entire container image repository. A maximum limit on the + // repository is an optional import of an entire container image repository. A maximum limit on the // number of tags imported this way is imposed by the server. Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` - // Images are a list of individual images to import. + // images are a list of individual images to import. Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` } // ImageStreamImportStatus contains information about the status of an image stream import. type ImageStreamImportStatus struct { - // Import is the image stream that was successfully updated or created when 'to' was set. + // import is the image stream that was successfully updated or created when 'to' was set. Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` - // Repository is set if spec.repository was set to the outcome of the import + // repository is set if spec.repository was set to the outcome of the import Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` - // Images is set with the result of importing spec.images + // images is set with the result of importing spec.images Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` } // RepositoryImportSpec describes a request to import images from a container image repository. type RepositoryImportSpec struct { - // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` } // RepositoryImportStatus describes the result of an image repository import type RepositoryImportStatus struct { - // Status reflects whether any failure occurred during import + // status reflects whether any failure occurred during import Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // Images is a list of images successfully retrieved by the import of the repository. + // images is a list of images successfully retrieved by the import of the repository. Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` - // AdditionalTags are tags that exist in the repository but were not imported because + // additionalTags are tags that exist in the repository but were not imported because // a maximum limit of automatic imports was applied. AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` } // ImageImportSpec describes a request to import a specific image. type ImageImportSpec struct { - // From is the source of an image to import; only kind DockerImage is allowed + // from is the source of an image to import; only kind DockerImage is allowed From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` } // ImageImportStatus describes the result of an image import. type ImageImportStatus struct { - // Status is the status of the image import, including errors encountered while retrieving the image + // status is the status of the image import, including errors encountered while retrieving the image Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` - // Image is the metadata of that image, if the image was located + // image is the metadata of that image, if the image was located Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Tag is the tag this image was located under, if any + // tag is the tag this image was located under, if any Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` - // Manifests holds sub-manifests metadata when importing a manifest list + // manifests holds sub-manifests metadata when importing a manifest list Manifests []Image `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` } diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go index ec7fc2b45..e0720bec7 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -27,16 +27,16 @@ func (DockerImageReference) SwaggerDoc() map[string]string { var map_Image = map[string]string{ "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", - "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", - "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", - "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", - "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", - "signatures": "Signatures holds all signatures of the image.", - "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", - "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", - "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", - "dockerImageManifests": "DockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "dockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "dockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "dockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", + "signatures": "signatures holds all signatures of the image.", + "dockerImageSignatures": "dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", + "dockerImageManifests": "dockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", } func (Image) SwaggerDoc() map[string]string { @@ -57,11 +57,11 @@ func (ImageBlobReferences) SwaggerDoc() map[string]string { var map_ImageImportSpec = map[string]string{ "": "ImageImportSpec describes a request to import a specific image.", - "from": "From is the source of an image to import; only kind DockerImage is allowed", - "to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", - "importPolicy": "ImportPolicy is the policy controlling how the image is imported", - "referencePolicy": "ReferencePolicy defines how other components should consume the image", - "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", + "from": "from is the source of an image to import; only kind DockerImage is allowed", + "to": "to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", } func (ImageImportSpec) SwaggerDoc() map[string]string { @@ -70,10 +70,10 @@ func (ImageImportSpec) SwaggerDoc() map[string]string { var map_ImageImportStatus = map[string]string{ "": "ImageImportStatus describes the result of an image import.", - "status": "Status is the status of the image import, including errors encountered while retrieving the image", - "image": "Image is the metadata of that image, if the image was located", - "tag": "Tag is the tag this image was located under, if any", - "manifests": "Manifests holds sub-manifests metadata when importing a manifest list", + "status": "status is the status of the image import, including errors encountered while retrieving the image", + "image": "image is the metadata of that image, if the image was located", + "tag": "tag is the tag this image was located under, if any", + "manifests": "manifests holds sub-manifests metadata when importing a manifest list", } func (ImageImportStatus) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ImageImportStatus) SwaggerDoc() map[string]string { var map_ImageLayer = map[string]string{ "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", - "name": "Name of the layer as defined by the underlying store.", - "size": "Size of the layer in bytes as defined by the underlying store.", - "mediaType": "MediaType of the referenced object.", + "name": "name of the layer as defined by the underlying store.", + "size": "size of the layer in bytes as defined by the underlying store.", + "mediaType": "mediaType of the referenced object.", } func (ImageLayer) SwaggerDoc() map[string]string { @@ -93,8 +93,8 @@ func (ImageLayer) SwaggerDoc() map[string]string { var map_ImageLayerData = map[string]string{ "": "ImageLayerData contains metadata about an image layer.", - "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", - "mediaType": "MediaType of the referenced object.", + "size": "size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "mediaType of the referenced object.", } func (ImageLayerData) SwaggerDoc() map[string]string { @@ -104,7 +104,7 @@ func (ImageLayerData) SwaggerDoc() map[string]string { var map_ImageList = map[string]string{ "": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of images", + "items": "items is a list of images", } func (ImageList) SwaggerDoc() map[string]string { @@ -122,12 +122,12 @@ func (ImageLookupPolicy) SwaggerDoc() map[string]string { var map_ImageManifest = map[string]string{ "": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.", - "digest": "Digest is the unique identifier for the manifest. It refers to an Image object.", - "mediaType": "MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", - "manifestSize": "ManifestSize represents the size of the raw object contents, in bytes.", - "architecture": "Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", - "os": "OS specifies the operating system, for example `linux`.", - "variant": "Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", + "digest": "digest is the unique identifier for the manifest. It refers to an Image object.", + "mediaType": "mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", + "manifestSize": "manifestSize represents the size of the raw object contents, in bytes.", + "architecture": "architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", + "os": "os specifies the operating system, for example `linux`.", + "variant": "variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", } func (ImageManifest) SwaggerDoc() map[string]string { @@ -139,7 +139,7 @@ var map_ImageSignature = map[string]string{ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "type": "Required: Describes a type of stored blob.", "content": "Required: An opaque binary string which is an image's signature.", - "conditions": "Conditions represent the latest available observations of a signature's current state.", + "conditions": "conditions represent the latest available observations of a signature's current state.", "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", "signedClaims": "Contains claims from the signature.", "created": "If specified, it is the time of signature's creation.", @@ -154,8 +154,8 @@ func (ImageSignature) SwaggerDoc() map[string]string { var map_ImageStream = map[string]string{ "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec describes the desired state of this stream", - "status": "Status describes the current state of this stream", + "spec": "spec describes the desired state of this stream", + "status": "status describes the current state of this stream", } func (ImageStream) SwaggerDoc() map[string]string { @@ -165,7 +165,7 @@ func (ImageStream) SwaggerDoc() map[string]string { var map_ImageStreamImage = map[string]string{ "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"@\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "image": "Image associated with the ImageStream and image name.", + "image": "image associated with the ImageStream and image name.", } func (ImageStreamImage) SwaggerDoc() map[string]string { @@ -175,8 +175,8 @@ func (ImageStreamImage) SwaggerDoc() map[string]string { var map_ImageStreamImport = map[string]string{ "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is a description of the images that the user wishes to import", - "status": "Status is the result of importing the image", + "spec": "spec is a description of the images that the user wishes to import", + "status": "status is the result of importing the image", } func (ImageStreamImport) SwaggerDoc() map[string]string { @@ -185,9 +185,9 @@ func (ImageStreamImport) SwaggerDoc() map[string]string { var map_ImageStreamImportSpec = map[string]string{ "": "ImageStreamImportSpec defines what images should be imported.", - "import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", - "repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", - "images": "Images are a list of individual images to import.", + "import": "import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "repository": "repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "images": "images are a list of individual images to import.", } func (ImageStreamImportSpec) SwaggerDoc() map[string]string { @@ -196,9 +196,9 @@ func (ImageStreamImportSpec) SwaggerDoc() map[string]string { var map_ImageStreamImportStatus = map[string]string{ "": "ImageStreamImportStatus contains information about the status of an image stream import.", - "import": "Import is the image stream that was successfully updated or created when 'to' was set.", - "repository": "Repository is set if spec.repository was set to the outcome of the import", - "images": "Images is set with the result of importing spec.images", + "import": "import is the image stream that was successfully updated or created when 'to' was set.", + "repository": "repository is set if spec.repository was set to the outcome of the import", + "images": "images is set with the result of importing spec.images", } func (ImageStreamImportStatus) SwaggerDoc() map[string]string { @@ -219,7 +219,7 @@ func (ImageStreamLayers) SwaggerDoc() map[string]string { var map_ImageStreamList = map[string]string{ "": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of imageStreams", + "items": "items is a list of imageStreams", } func (ImageStreamList) SwaggerDoc() map[string]string { @@ -229,8 +229,8 @@ func (ImageStreamList) SwaggerDoc() map[string]string { var map_ImageStreamMapping = map[string]string{ "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "image": "Image is a container image.", - "tag": "Tag is a string value this image can be located with inside the stream.", + "image": "image is a container image.", + "tag": "tag is a string value this image can be located with inside the stream.", } func (ImageStreamMapping) SwaggerDoc() map[string]string { @@ -250,9 +250,9 @@ func (ImageStreamSpec) SwaggerDoc() map[string]string { var map_ImageStreamStatus = map[string]string{ "": "ImageStreamStatus contains information about the state of this image stream.", - "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", - "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", - "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", + "dockerImageRepository": "dockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "publicDockerImageRepository": "publicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "tags": "tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", } func (ImageStreamStatus) SwaggerDoc() map[string]string { @@ -276,7 +276,7 @@ func (ImageStreamTag) SwaggerDoc() map[string]string { var map_ImageStreamTagList = map[string]string{ "": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of image stream tags", + "items": "items is the list of image stream tags", } func (ImageStreamTagList) SwaggerDoc() map[string]string { @@ -298,7 +298,7 @@ func (ImageTag) SwaggerDoc() map[string]string { var map_ImageTagList = map[string]string{ "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of image stream tags", + "items": "items is the list of image stream tags", } func (ImageTagList) SwaggerDoc() map[string]string { @@ -307,9 +307,9 @@ func (ImageTagList) SwaggerDoc() map[string]string { var map_NamedTagEventList = map[string]string{ "": "NamedTagEventList relates a tag to its image history.", - "tag": "Tag is the tag for which the history is recorded", + "tag": "tag is the tag for which the history is recorded", "items": "Standard object's metadata.", - "conditions": "Conditions is an array of conditions that apply to the tag event list.", + "conditions": "conditions is an array of conditions that apply to the tag event list.", } func (NamedTagEventList) SwaggerDoc() map[string]string { @@ -318,10 +318,10 @@ func (NamedTagEventList) SwaggerDoc() map[string]string { var map_RepositoryImportSpec = map[string]string{ "": "RepositoryImportSpec describes a request to import images from a container image repository.", - "from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", - "importPolicy": "ImportPolicy is the policy controlling how the image is imported", - "referencePolicy": "ReferencePolicy defines how other components should consume the image", - "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", + "from": "from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", } func (RepositoryImportSpec) SwaggerDoc() map[string]string { @@ -330,9 +330,9 @@ func (RepositoryImportSpec) SwaggerDoc() map[string]string { var map_RepositoryImportStatus = map[string]string{ "": "RepositoryImportStatus describes the result of an image repository import", - "status": "Status reflects whether any failure occurred during import", - "images": "Images is a list of images successfully retrieved by the import of the repository.", - "additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", + "status": "status reflects whether any failure occurred during import", + "images": "images is a list of images successfully retrieved by the import of the repository.", + "additionalTags": "additionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", } func (RepositoryImportStatus) SwaggerDoc() map[string]string { @@ -341,8 +341,8 @@ func (RepositoryImportStatus) SwaggerDoc() map[string]string { var map_SignatureCondition = map[string]string{ "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", - "type": "Type of signature condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", + "type": "type of signature condition, Complete or Failed.", + "status": "status of the condition, one of True, False, Unknown.", "lastProbeTime": "Last time the condition was checked.", "lastTransitionTime": "Last time the condition transit from one status to another.", "reason": "(brief) reason for the condition's last transition.", @@ -355,7 +355,7 @@ func (SignatureCondition) SwaggerDoc() map[string]string { var map_SignatureGenericEntity = map[string]string{ "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", - "organization": "Organization name.", + "organization": "organization name.", "commonName": "Common name (e.g. openshift-signing-service).", } @@ -382,10 +382,10 @@ func (SignatureSubject) SwaggerDoc() map[string]string { var map_TagEvent = map[string]string{ "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", - "created": "Created holds the time the TagEvent was created", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image", - "image": "Image is the image", - "generation": "Generation is the spec tag generation that resulted in this tag being updated", + "created": "created holds the time the TagEvent was created", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image", + "image": "image is the image", + "generation": "generation is the spec tag generation that resulted in this tag being updated", } func (TagEvent) SwaggerDoc() map[string]string { @@ -394,12 +394,12 @@ func (TagEvent) SwaggerDoc() map[string]string { var map_TagEventCondition = map[string]string{ "": "TagEventCondition contains condition information for a tag event.", - "type": "Type of tag event condition, currently only ImportSuccess", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.", - "reason": "Reason is a brief machine readable explanation for the condition's last transition.", - "message": "Message is a human readable description of the details about last transition, complementing reason.", - "generation": "Generation is the spec tag generation that this status corresponds to", + "type": "type of tag event condition, currently only ImportSuccess", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details about last transition, complementing reason.", + "generation": "generation is the spec tag generation that this status corresponds to", } func (TagEventCondition) SwaggerDoc() map[string]string { @@ -408,9 +408,9 @@ func (TagEventCondition) SwaggerDoc() map[string]string { var map_TagImportPolicy = map[string]string{ "": "TagImportPolicy controls how images related to this tag will be imported.", - "insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", - "scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", - "importMode": "ImportMode describes how to import an image manifest.", + "insecure": "insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "scheduled": "scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", + "importMode": "importMode describes how to import an image manifest.", } func (TagImportPolicy) SwaggerDoc() map[string]string { @@ -419,13 +419,13 @@ func (TagImportPolicy) SwaggerDoc() map[string]string { var map_TagReference = map[string]string{ "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", - "name": "Name of the tag", + "name": "name of the tag", "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", - "reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.", - "generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", - "importPolicy": "ImportPolicy is information that controls how images may be imported by the server.", - "referencePolicy": "ReferencePolicy defines how other components should consume the image.", + "reference": "reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "generation": "generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "importPolicy": "importPolicy is information that controls how images may be imported by the server.", + "referencePolicy": "referencePolicy defines how other components should consume the image.", } func (TagReference) SwaggerDoc() map[string]string { @@ -434,7 +434,7 @@ func (TagReference) SwaggerDoc() map[string]string { var map_TagReferencePolicy = map[string]string{ "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", - "type": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", + "type": "type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", } func (TagReferencePolicy) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index 9b3cc21a4..3dde9d4c3 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -477,7 +477,7 @@ const ( // EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, // different pointers may be used type EncryptionAlibaba struct { - // Method defines the different encrytion modes available + // method defines the different encrytion modes available // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default is `AES256`. // +kubebuilder:validation:Enum="KMS";"AES256" @@ -485,14 +485,14 @@ type EncryptionAlibaba struct { // +optional Method AlibabaEncryptionMethod `json:"method"` - // KMS (key management service) is an encryption type that holds the struct for KMS KeyID + // kms (key management service) is an encryption type that holds the struct for KMS KeyID // +optional KMS *KMSEncryptionAlibaba `json:"kms,omitempty"` } type KMSEncryptionAlibaba struct { - // KeyID holds the KMS encryption key ID - // +kubebuilder:validation:Required + // keyID holds the KMS encryption key ID + // +required // +kubebuilder:validation:MinLength=1 KeyID string `json:"keyID"` } @@ -501,7 +501,7 @@ type KMSEncryptionAlibaba struct { // Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. // More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm) type ImageRegistryConfigStorageAlibabaOSS struct { - // Bucket is the bucket name in which you want to store the registry's data. + // bucket is the bucket name in which you want to store the registry's data. // About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default will be autogenerated in the form of -image-registry-- @@ -510,20 +510,20 @@ type ImageRegistryConfigStorageAlibabaOSS struct { // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$` // +optional Bucket string `json:"bucket,omitempty"` - // Region is the Alibaba Cloud Region in which your bucket exists. + // region is the Alibaba Cloud Region in which your bucket exists. // For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default will be based on the installed Alibaba Cloud Region. // +optional Region string `json:"region,omitempty"` - // EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint + // endpointAccessibility specifies whether the registry use the OSS VPC internal endpoint // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default is `Internal`. // +kubebuilder:validation:Enum="Internal";"Public";"" // +kubebuilder:default="Internal" // +optional EndpointAccessibility EndpointAccessibility `json:"endpointAccessibility,omitempty"` - // Encryption specifies whether you would like your data encrypted on the server side. + // encryption specifies whether you would like your data encrypted on the server side. // More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) // +optional Encryption *EncryptionAlibaba `json:"encryption,omitempty"` @@ -556,7 +556,7 @@ type ImageRegistryConfigStorage struct { // ibmcos represents configuration that uses IBM Cloud Object Storage. // +optional IBMCOS *ImageRegistryConfigStorageIBMCOS `json:"ibmcos,omitempty"` - // Oss represents configuration that uses Alibaba Cloud Object Storage Service. + // oss represents configuration that uses Alibaba Cloud Object Storage Service. // +optional OSS *ImageRegistryConfigStorageAlibabaOSS `json:"oss,omitempty"` // managementState indicates if the operator manages the underlying diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go index ec999f309..f8b421ae8 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go @@ -52,8 +52,8 @@ func (ConfigList) SwaggerDoc() map[string]string { var map_EncryptionAlibaba = map[string]string{ "": "EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, different pointers may be used", - "method": "Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`.", - "kms": "KMS (key management service) is an encryption type that holds the struct for KMS KeyID", + "method": "method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`.", + "kms": "kms (key management service) is an encryption type that holds the struct for KMS KeyID", } func (EncryptionAlibaba) SwaggerDoc() map[string]string { @@ -112,7 +112,7 @@ var map_ImageRegistryConfigStorage = map[string]string{ "pvc": "pvc represents configuration that uses a PersistentVolumeClaim.", "azure": "azure represents configuration that uses Azure Blob Storage.", "ibmcos": "ibmcos represents configuration that uses IBM Cloud Object Storage.", - "oss": "Oss represents configuration that uses Alibaba Cloud Object Storage Service.", + "oss": "oss represents configuration that uses Alibaba Cloud Object Storage Service.", "managementState": "managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed.", } @@ -122,10 +122,10 @@ func (ImageRegistryConfigStorage) SwaggerDoc() map[string]string { var map_ImageRegistryConfigStorageAlibabaOSS = map[string]string{ "": "ImageRegistryConfigStorageAlibabaOSS holds Alibaba Cloud OSS configuration. Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm)", - "bucket": "Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry--", - "region": "Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region.", - "endpointAccessibility": "EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`.", - "encryption": "Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)", + "bucket": "bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry--", + "region": "region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region.", + "endpointAccessibility": "endpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`.", + "encryption": "encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)", } func (ImageRegistryConfigStorageAlibabaOSS) SwaggerDoc() map[string]string { @@ -266,7 +266,7 @@ func (ImageRegistryStatus) SwaggerDoc() map[string]string { } var map_KMSEncryptionAlibaba = map[string]string{ - "keyID": "KeyID holds the KMS encryption key ID", + "keyID": "keyID holds the KMS encryption key ID", } func (KMSEncryptionAlibaba) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go index 6d29f42e3..cd1ba7ec5 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go @@ -38,7 +38,7 @@ type KubeAPIServerConfig struct { // DEPRECATED: consolePublicURL has been deprecated and setting it has no effect. ConsolePublicURL string `json:"consolePublicURL"` - // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! // TODO I think we should just drop this feature. UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` @@ -153,7 +153,7 @@ type UserAgentMatchRule struct { type UserAgentDenyRule struct { UserAgentMatchRule `json:",inline"` - // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + // rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. RejectionMessage string `json:"rejectionMessage"` } @@ -231,6 +231,6 @@ type KubeControllerManagerProjectConfig struct { // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. type ServiceServingCert struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` } diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go index 5ecdd0583..7b5bef143 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -28,7 +28,7 @@ var map_KubeAPIServerConfig = map[string]string{ "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", "consolePublicURL": "DEPRECATED: consolePublicURL has been deprecated and setting it has no effect.", - "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "userAgentMatchingConfig": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", "projectConfig": "projectConfig feeds an admission plugin", "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", @@ -112,7 +112,7 @@ func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { var map_ServiceServingCert = map[string]string{ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", - "certFile": "CertFile is a file containing a PEM-encoded certificate", + "certFile": "certFile is a file containing a PEM-encoded certificate", } func (ServiceServingCert) SwaggerDoc() map[string]string { @@ -121,7 +121,7 @@ func (ServiceServingCert) SwaggerDoc() map[string]string { var map_UserAgentDenyRule = map[string]string{ "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", - "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", + "rejectionMessage": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", } func (UserAgentDenyRule) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go index eaf40b6ee..c0e03c233 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/types.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go @@ -21,7 +21,7 @@ type ExtendedArguments map[string][]string type NodeConfig struct { metav1.TypeMeta `json:",inline"` - // NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. + // nodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. // If you're describing a set of static nodes to the master, this value must match one of the values in the list NodeName string `json:"nodeName"` @@ -29,40 +29,40 @@ type NodeConfig struct { // If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used NodeIP string `json:"nodeIP"` - // ServingInfo describes how to start serving + // servingInfo describes how to start serving ServingInfo ServingInfo `json:"servingInfo"` - // MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master + // masterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master MasterKubeConfig string `json:"masterKubeConfig"` - // MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master. + // masterClientConnectionOverrides provides overrides to the client connection used to connect to the master. MasterClientConnectionOverrides *ClientConnectionOverrides `json:"masterClientConnectionOverrides"` - // DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to + // dnsDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to // 'cluster.local'. DNSDomain string `json:"dnsDomain"` - // DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes + // dnsIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes // master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured // to resolve names from any other port). When running more complex local DNS configurations, this is often set // to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see // dnsBindAddress) or the master DNS. DNSIP string `json:"dnsIP"` - // DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. + // dnsBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. // Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need // a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured // on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other // queries to the host environments nameservers. DNSBindAddress string `json:"dnsBindAddress"` - // DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running + // dnsNameservers is a list of ip:port values of recursive nameservers to forward queries to when running // a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to // the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the // system, this value should be set to the upstream nameservers dnsmasq resolves with. DNSNameservers []string `json:"dnsNameservers"` - // DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. + // dnsRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. // Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra // nameservers to DNSNameservers if set. DNSRecursiveResolvConf string `json:"dnsRecursiveResolvConf"` @@ -70,57 +70,57 @@ type NodeConfig struct { // Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead DeprecatedNetworkPluginName string `json:"networkPluginName,omitempty"` - // NetworkConfig provides network options for the node + // networkConfig provides network options for the node NetworkConfig NodeNetworkConfig `json:"networkConfig"` - // VolumeDirectory is the directory that volumes will be stored under + // volumeDirectory is the directory that volumes will be stored under VolumeDirectory string `json:"volumeDirectory"` - // ImageConfig holds options that describe how to build image names for system components + // imageConfig holds options that describe how to build image names for system components ImageConfig ImageConfig `json:"imageConfig"` - // AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started. + // allowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started. AllowDisabledDocker bool `json:"allowDisabledDocker"` - // PodManifestConfig holds the configuration for enabling the Kubelet to + // podManifestConfig holds the configuration for enabling the Kubelet to // create pods based from a manifest file(s) placed locally on the node PodManifestConfig *PodManifestConfig `json:"podManifestConfig"` - // AuthConfig holds authn/authz configuration options + // authConfig holds authn/authz configuration options AuthConfig NodeAuthConfig `json:"authConfig"` - // DockerConfig holds Docker related configuration options. + // dockerConfig holds Docker related configuration options. DockerConfig DockerConfig `json:"dockerConfig"` - // KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's + // kubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's // command line arguments. These are not migrated or validated, so if you use them they may become invalid. // These values override other settings in NodeConfig which may cause invalid configurations. KubeletArguments ExtendedArguments `json:"kubeletArguments,omitempty"` - // ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's + // proxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's // command line arguments. These are not migrated or validated, so if you use them they may become invalid. // These values override other settings in NodeConfig which may cause invalid configurations. ProxyArguments ExtendedArguments `json:"proxyArguments,omitempty"` - // IPTablesSyncPeriod is how often iptable rules are refreshed + // iptablesSyncPeriod is how often iptable rules are refreshed IPTablesSyncPeriod string `json:"iptablesSyncPeriod"` - // EnableUnidling controls whether or not the hybrid unidling proxy will be set up + // enableUnidling controls whether or not the hybrid unidling proxy will be set up EnableUnidling *bool `json:"enableUnidling"` - // VolumeConfig contains options for configuring volumes on the node. + // volumeConfig contains options for configuring volumes on the node. VolumeConfig NodeVolumeConfig `json:"volumeConfig"` } // NodeVolumeConfig contains options for configuring volumes on the node. type NodeVolumeConfig struct { - // LocalQuota contains options for controlling local volume quota on the node. + // localQuota contains options for controlling local volume quota on the node. LocalQuota LocalQuota `json:"localQuota"` } // MasterVolumeConfig contains options for configuring volume plugins in the master node. type MasterVolumeConfig struct { - // DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true + // dynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true DynamicProvisioningEnabled *bool `json:"dynamicProvisioningEnabled"` } @@ -134,24 +134,24 @@ type LocalQuota struct { // NodeAuthConfig holds authn/authz configuration options type NodeAuthConfig struct { - // AuthenticationCacheTTL indicates how long an authentication result should be cached. + // authenticationCacheTTL indicates how long an authentication result should be cached. // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled AuthenticationCacheTTL string `json:"authenticationCacheTTL"` - // AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used. + // authenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used. AuthenticationCacheSize int `json:"authenticationCacheSize"` - // AuthorizationCacheTTL indicates how long an authorization result should be cached. + // authorizationCacheTTL indicates how long an authorization result should be cached. // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled AuthorizationCacheTTL string `json:"authorizationCacheTTL"` - // AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used. + // authorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used. AuthorizationCacheSize int `json:"authorizationCacheSize"` } // NodeNetworkConfig provides network options for the node type NodeNetworkConfig struct { - // NetworkPluginName is a string specifying the networking plugin + // networkPluginName is a string specifying the networking plugin NetworkPluginName string `json:"networkPluginName"` // Maximum transmission unit for the network packets MTU uint32 `json:"mtu"` @@ -159,14 +159,14 @@ type NodeNetworkConfig struct { // DockerConfig holds Docker related configuration options. type DockerConfig struct { - // ExecHandlerName is the name of the handler to use for executing + // execHandlerName is the name of the handler to use for executing // commands in containers. ExecHandlerName DockerExecHandlerType `json:"execHandlerName"` - // DockerShimSocket is the location of the dockershim socket the kubelet uses. + // dockerShimSocket is the location of the dockershim socket the kubelet uses. // Currently unix socket is supported on Linux, and tcp is supported on windows. // Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735' DockerShimSocket string `json:"dockerShimSocket"` - // DockershimRootDirectory is the dockershim root directory. + // dockerShimRootDirectory is the dockershim root directory. DockershimRootDirectory string `json:"dockerShimRootDirectory"` } @@ -197,45 +197,45 @@ type FeatureList []string type MasterConfig struct { metav1.TypeMeta `json:",inline"` - // ServingInfo describes how to start serving + // servingInfo describes how to start serving ServingInfo HTTPServingInfo `json:"servingInfo"` - // AuthConfig configures authentication options in addition to the standard + // authConfig configures authentication options in addition to the standard // oauth token and client certificate authenticators AuthConfig MasterAuthConfig `json:"authConfig"` - // AggregatorConfig has options for configuring the aggregator component of the API server. + // aggregatorConfig has options for configuring the aggregator component of the API server. AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` // CORSAllowedOrigins CORSAllowedOrigins []string `json:"corsAllowedOrigins"` - // APILevels is a list of API levels that should be enabled on startup: v1 as examples + // apiLevels is a list of API levels that should be enabled on startup: v1 as examples APILevels []string `json:"apiLevels"` - // MasterPublicURL is how clients can access the OpenShift API server + // masterPublicURL is how clients can access the OpenShift API server MasterPublicURL string `json:"masterPublicURL"` - // Controllers is a list of the controllers that should be started. If set to "none", no controllers + // controllers is a list of the controllers that should be started. If set to "none", no controllers // will start automatically. The default value is "*" which will start all controllers. When // using "*", you may exclude controllers by prepending a "-" in front of their name. No other // values are recognized at this time. Controllers string `json:"controllers"` - // AdmissionConfig contains admission control plugin configuration. + // admissionConfig contains admission control plugin configuration. AdmissionConfig AdmissionConfig `json:"admissionConfig"` - // ControllerConfig holds configuration values for controllers + // controllerConfig holds configuration values for controllers ControllerConfig ControllerConfig `json:"controllerConfig"` - // EtcdStorageConfig contains information about how API resources are + // etcdStorageConfig contains information about how API resources are // stored in Etcd. These values are only relevant when etcd is the // backing store for the cluster. EtcdStorageConfig EtcdStorageConfig `json:"etcdStorageConfig"` - // EtcdClientInfo contains information about how to connect to etcd + // etcdClientInfo contains information about how to connect to etcd EtcdClientInfo EtcdConnectionInfo `json:"etcdClientInfo"` - // KubeletClientInfo contains information about how to connect to kubelets + // kubeletClientInfo contains information about how to connect to kubelets KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` // KubernetesMasterConfig, if present start the kubernetes master in this process @@ -248,38 +248,38 @@ type MasterConfig struct { // DNSConfig, if present start the DNS server in this process DNSConfig *DNSConfig `json:"dnsConfig"` - // ServiceAccountConfig holds options related to service accounts + // serviceAccountConfig holds options related to service accounts ServiceAccountConfig ServiceAccountConfig `json:"serviceAccountConfig"` - // MasterClients holds all the client connection information for controllers and other system components + // masterClients holds all the client connection information for controllers and other system components MasterClients MasterClients `json:"masterClients"` - // ImageConfig holds options that describe how to build image names for system components + // imageConfig holds options that describe how to build image names for system components ImageConfig ImageConfig `json:"imageConfig"` - // ImagePolicyConfig controls limits and behavior for importing images + // imagePolicyConfig controls limits and behavior for importing images ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"` - // PolicyConfig holds information about where to locate critical pieces of bootstrapping policy + // policyConfig holds information about where to locate critical pieces of bootstrapping policy PolicyConfig PolicyConfig `json:"policyConfig"` - // ProjectConfig holds information about project creation and defaults + // projectConfig holds information about project creation and defaults ProjectConfig ProjectConfig `json:"projectConfig"` - // RoutingConfig holds information about routing and route generation + // routingConfig holds information about routing and route generation RoutingConfig RoutingConfig `json:"routingConfig"` - // NetworkConfig to be passed to the compiled in network plugin + // networkConfig to be passed to the compiled in network plugin NetworkConfig MasterNetworkConfig `json:"networkConfig"` // MasterVolumeConfig contains options for configuring volume plugins in the master node. VolumeConfig MasterVolumeConfig `json:"volumeConfig"` - // JenkinsPipelineConfig holds information about the default Jenkins template + // jenkinsPipelineConfig holds information about the default Jenkins template // used for JenkinsPipeline build strategy. JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"` - // AuditConfig holds information related to auditing capabilities. + // auditConfig holds information related to auditing capabilities. AuditConfig AuditConfig `json:"auditConfig"` // DisableOpenAPI avoids starting the openapi endpoint because it is very expensive. @@ -290,11 +290,11 @@ type MasterConfig struct { // MasterAuthConfig configures authentication options in addition to the standard // oauth token and client certificate authenticators type MasterAuthConfig struct { - // RequestHeader holds options for setting up a front proxy against the API. It is optional. + // requestHeader holds options for setting up a front proxy against the API. It is optional. RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` // WebhookTokenAuthnConfig, if present configures remote token reviewers WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` - // OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization // Server Metadata for an external OAuth server. // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 // This option is mutually exclusive with OAuthConfig @@ -304,22 +304,22 @@ type MasterAuthConfig struct { // RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire // API instead of against the /oauth endpoint. type RequestHeaderAuthenticationOptions struct { - // ClientCA is a file with the trusted signer certs. It is required. + // clientCA is a file with the trusted signer certs. It is required. ClientCA string `json:"clientCA"` - // ClientCommonNames is a required list of common names to require a match from. + // clientCommonNames is a required list of common names to require a match from. ClientCommonNames []string `json:"clientCommonNames"` - // UsernameHeaders is the list of headers to check for user information. First hit wins. + // usernameHeaders is the list of headers to check for user information. First hit wins. UsernameHeaders []string `json:"usernameHeaders"` // GroupNameHeader is the set of headers to check for group information. All are unioned. GroupHeaders []string `json:"groupHeaders"` - // ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + // extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` } // AggregatorConfig holds information required to make the aggregator function. type AggregatorConfig struct { - // ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + // proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers ProxyClientInfo CertInfo `json:"proxyClientInfo"` } @@ -357,9 +357,9 @@ type AuditConfig struct { // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. MaximumFileSizeMegabytes int `json:"maximumFileSizeMegabytes"` - // PolicyFile is a path to the file that defines the audit policy configuration. + // policyFile is a path to the file that defines the audit policy configuration. PolicyFile string `json:"policyFile"` - // PolicyConfiguration is an embedded policy configuration object to be used + // policyConfiguration is an embedded policy configuration object to be used // as the audit policy configuration. If present, it will be used instead of // the path to the policy file. PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` @@ -375,52 +375,52 @@ type AuditConfig struct { // JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy type JenkinsPipelineConfig struct { - // AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided + // autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided // template when the first build config in the project with type JenkinsPipeline // is created. When not specified this option defaults to true. AutoProvisionEnabled *bool `json:"autoProvisionEnabled"` - // TemplateNamespace contains the namespace name where the Jenkins template is stored + // templateNamespace contains the namespace name where the Jenkins template is stored TemplateNamespace string `json:"templateNamespace"` - // TemplateName is the name of the default Jenkins template + // templateName is the name of the default Jenkins template TemplateName string `json:"templateName"` - // ServiceName is the name of the Jenkins service OpenShift uses to detect + // serviceName is the name of the Jenkins service OpenShift uses to detect // whether a Jenkins pipeline handler has already been installed in a project. // This value *must* match a service name in the provided template. ServiceName string `json:"serviceName"` - // Parameters specifies a set of optional parameters to the Jenkins template. + // parameters specifies a set of optional parameters to the Jenkins template. Parameters map[string]string `json:"parameters"` } // ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images type ImagePolicyConfig struct { - // MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user + // maxImagesBulkImportedPerRepository controls the number of images that are imported when a user // does a bulk import of a container repository. This number defaults to 50 to prevent users from // importing large numbers of images accidentally. Set -1 for no limit. MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"` - // DisableScheduledImport allows scheduled background import of images to be disabled. + // disableScheduledImport allows scheduled background import of images to be disabled. DisableScheduledImport bool `json:"disableScheduledImport"` - // ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams + // scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams // scheduled for background import are checked against the upstream repository. The default value is 15 minutes. ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"` - // MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the + // maxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the // background per minute. The default value is 60. Set to -1 for unlimited. MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"` - // AllowedRegistriesForImport limits the container image registries that normal users may import + // allowedRegistriesForImport limits the container image registries that normal users may import // images from. Set this list to the registries that you trust to contain valid Docker // images and that you want applications to be able to import from. Users with // permission to create Images or ImageStreamMappings via the API are not affected by // this policy - typically only administrators or system integrations will have those // permissions. AllowedRegistriesForImport *AllowedRegistries `json:"allowedRegistriesForImport,omitempty"` - // InternalRegistryHostname sets the hostname for the default internal image + // internalRegistryHostname sets the hostname for the default internal image // registry. The value must be in "hostname[:port]" format. InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` - // ExternalRegistryHostname sets the hostname for the default external image + // externalRegistryHostname sets the hostname for the default external image // registry. The external hostname should be set only when the image registry // is exposed externally. The value is used in 'publicDockerImageRepository' // field in ImageStreams. The value must be in "hostname[:port]" format. ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"` - // AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that + // additionalTrustedCA is a path to a pem bundle file containing additional CAs that // should be trusted during imagestream import. AdditionalTrustedCA string `json:"additionalTrustedCA,omitempty"` } @@ -431,40 +431,40 @@ type AllowedRegistries []RegistryLocation // RegistryLocation contains a location of the registry specified by the registry domain // name. The domain name might include wildcards, like '*' or '??'. type RegistryLocation struct { - // DomainName specifies a domain name for the registry + // domainName specifies a domain name for the registry // In case the registry use non-standard (80 or 443) port, the port should be included // in the domain name as well. DomainName string `json:"domainName"` - // Insecure indicates whether the registry is secure (https) or insecure (http) + // insecure indicates whether the registry is secure (https) or insecure (http) // By default (if not specified) the registry is assumed as secure. Insecure bool `json:"insecure,omitempty"` } // holds the necessary configuration options for type ProjectConfig struct { - // DefaultNodeSelector holds default project node label selector + // defaultNodeSelector holds default project node label selector DefaultNodeSelector string `json:"defaultNodeSelector"` - // ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint ProjectRequestMessage string `json:"projectRequestMessage"` - // ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. // It is in the format namespace/template and it is optional. // If it is not specified, a default template is used. ProjectRequestTemplate string `json:"projectRequestTemplate"` - // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. + // securityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. SecurityAllocator *SecurityAllocator `json:"securityAllocator"` } // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. type SecurityAllocator struct { - // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the // ranges container images will use once user namespaces are started). UIDAllocatorRange string `json:"uidAllocatorRange"` - // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default @@ -475,14 +475,14 @@ type SecurityAllocator struct { // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 // MCSAllocatorRange string `json:"mcsAllocatorRange"` - // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS // ranges (100k namespaces, 535k/5 labels). MCSLabelsPerProject int `json:"mcsLabelsPerProject"` } // holds the necessary configuration options for type PolicyConfig struct { - // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` } @@ -494,7 +494,7 @@ type UserAgentMatchingConfig struct { // If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes DeniedClients []UserAgentDenyRule `json:"deniedClients"` - // DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + // defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. DefaultRejectionMessage string `json:"defaultRejectionMessage"` } @@ -512,7 +512,7 @@ type UserAgentMatchRule struct { // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f Regex string `json:"regex"` - // HTTPVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + // httpVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". HTTPVerbs []string `json:"httpVerbs"` } @@ -520,13 +520,13 @@ type UserAgentMatchRule struct { type UserAgentDenyRule struct { UserAgentMatchRule `json:",inline"` - // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + // rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. RejectionMessage string `json:"rejectionMessage"` } // RoutingConfig holds the necessary configuration options for routing to subdomains type RoutingConfig struct { - // Subdomain is the suffix appended to $service.$namespace. to form the default route hostname + // subdomain is the suffix appended to $service.$namespace. to form the default route hostname // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the // "default" route. Subdomain string `json:"subdomain"` @@ -534,51 +534,51 @@ type RoutingConfig struct { // MasterNetworkConfig to be passed to the compiled in network plugin type MasterNetworkConfig struct { - // NetworkPluginName is the name of the network plugin to use + // networkPluginName is the name of the network plugin to use NetworkPluginName string `json:"networkPluginName"` - // ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + // clusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. DeprecatedClusterNetworkCIDR string `json:"clusterNetworkCIDR,omitempty"` - // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set. + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set. ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` - // HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + // hostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. DeprecatedHostSubnetLength uint32 `json:"hostSubnetLength,omitempty"` // ServiceNetwork is the CIDR string to specify the service networks ServiceNetworkCIDR string `json:"serviceNetworkCIDR"` - // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // externalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` - // IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare + // ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, // nodes, pods, or services. IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"` - // VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value + // vxlanPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value VXLANPort uint32 `json:"vxlanPort,omitempty"` } // ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. type ClusterNetworkEntry struct { - // CIDR defines the total range of a cluster networks address space. + // cidr defines the total range of a cluster networks address space. CIDR string `json:"cidr"` - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. HostSubnetLength uint32 `json:"hostSubnetLength"` } // ImageConfig holds the necessary configuration options for building image names for system components type ImageConfig struct { - // Format is the format of the name to be built for the system component + // format is the format of the name to be built for the system component Format string `json:"format"` - // Latest determines if the latest tag will be pulled from the registry + // latest determines if the latest tag will be pulled from the registry Latest bool `json:"latest"` } // RemoteConnectionInfo holds information necessary for establishing a remote connection type RemoteConnectionInfo struct { - // URL is the remote URL to connect to + // url is the remote URL to connect to URL string `json:"url"` - // CA is the CA for verifying TLS connections + // ca is the CA for verifying TLS connections CA string `json:"ca"` // CertInfo is the TLS client cert information to present // this is anonymous so that we can inline it for serialization @@ -587,9 +587,9 @@ type RemoteConnectionInfo struct { // KubeletConnectionInfo holds information necessary for connecting to a kubelet type KubeletConnectionInfo struct { - // Port is the port to connect to kubelets on + // port is the port to connect to kubelets on Port uint `json:"port"` - // CA is the CA for verifying TLS connections to kubelets + // ca is the CA for verifying TLS connections to kubelets CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to kubelets // this is anonymous so that we can inline it for serialization @@ -598,9 +598,9 @@ type KubeletConnectionInfo struct { // EtcdConnectionInfo holds information necessary for connecting to an etcd server type EtcdConnectionInfo struct { - // URLs are the URLs for etcd + // urls are the URLs for etcd URLs []string `json:"urls"` - // CA is a file containing trusted roots for the etcd server certificates + // ca is a file containing trusted roots for the etcd server certificates CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to etcd // this is anonymous so that we can inline it for serialization @@ -609,19 +609,19 @@ type EtcdConnectionInfo struct { // EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes type EtcdStorageConfig struct { - // KubernetesStorageVersion is the API version that Kube resources in etcd should be + // kubernetesStorageVersion is the API version that Kube resources in etcd should be // serialized to. This value should *not* be advanced until all clients in the // cluster that read from etcd have code that allows them to read the new version. KubernetesStorageVersion string `json:"kubernetesStorageVersion"` - // KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will + // kubernetesStoragePrefix is the path within etcd that the Kubernetes resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. The default value is 'kubernetes.io'. KubernetesStoragePrefix string `json:"kubernetesStoragePrefix"` - // OpenShiftStorageVersion is the API version that OS resources in etcd should be + // openShiftStorageVersion is the API version that OS resources in etcd should be // serialized to. This value should *not* be advanced until all clients in the // cluster that read from etcd have code that allows them to read the new version. OpenShiftStorageVersion string `json:"openShiftStorageVersion"` - // OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will + // openShiftStoragePrefix is the path within etcd that the OpenShift resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. The default value is 'openshift.io'. OpenShiftStoragePrefix string `json:"openShiftStoragePrefix"` @@ -629,29 +629,29 @@ type EtcdStorageConfig struct { // ServingInfo holds information about serving web pages type ServingInfo struct { - // BindAddress is the ip:port to serve on + // bindAddress is the ip:port to serve on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` // CertInfo is the TLS cert info for serving secure traffic. // this is anonymous so that we can inline it for serialization CertInfo `json:",inline"` - // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates ClientCA string `json:"clientCA"` - // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + // namedCertificates is a list of certificates to use to secure requests to specific hostnames NamedCertificates []NamedCertificate `json:"namedCertificates"` - // MinTLSVersion is the minimum TLS version supported. + // minTLSVersion is the minimum TLS version supported. // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants MinTLSVersion string `json:"minTLSVersion,omitempty"` - // CipherSuites contains an overridden list of ciphers for the server to support. + // cipherSuites contains an overridden list of ciphers for the server to support. // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants CipherSuites []string `json:"cipherSuites,omitempty"` } // NamedCertificate specifies a certificate/key, and the names it should be served for type NamedCertificate struct { - // Names is a list of DNS names this certificate should be used to secure + // names is a list of DNS names this certificate should be used to secure // A name can be a normal DNS name, or can contain leading wildcard segments. Names []string `json:"names"` // CertInfo is the TLS cert info for serving secure traffic @@ -662,45 +662,45 @@ type NamedCertificate struct { type HTTPServingInfo struct { // ServingInfo is the HTTP serving information ServingInfo `json:",inline"` - // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. MaxRequestsInFlight int `json:"maxRequestsInFlight"` - // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if // -1 there is no limit on requests. RequestTimeoutSeconds int `json:"requestTimeoutSeconds"` } // MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes type MasterClients struct { - // OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master + // openshiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master OpenShiftLoopbackKubeConfig string `json:"openshiftLoopbackKubeConfig"` - // OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master. + // openshiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master. OpenShiftLoopbackClientConnectionOverrides *ClientConnectionOverrides `json:"openshiftLoopbackClientConnectionOverrides"` } // ClientConnectionOverrides are a set of overrides to the default client connection settings. type ClientConnectionOverrides struct { - // AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the // default value of 'application/json'. This field will control all connections to the server used by a particular // client. AcceptContentTypes string `json:"acceptContentTypes"` - // ContentType is the content type used when sending data to the server from this client. + // contentType is the content type used when sending data to the server from this client. ContentType string `json:"contentType"` - // QPS controls the number of queries per second allowed for this connection. + // qps controls the number of queries per second allowed for this connection. QPS float32 `json:"qps"` - // Burst allows extra queries to accumulate when a client is exceeding its rate. + // burst allows extra queries to accumulate when a client is exceeding its rate. Burst int32 `json:"burst"` } // DNSConfig holds the necessary configuration options for DNS type DNSConfig struct { - // BindAddress is the ip:port to serve DNS on + // bindAddress is the ip:port to serve DNS on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` - // AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open + // allowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open // resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible // to public networks. AllowRecursiveQueries bool `json:"allowRecursiveQueries"` @@ -709,9 +709,9 @@ type DNSConfig struct { // WebhookTokenAuthenticators holds the necessary configuation options for // external token authenticators type WebhookTokenAuthenticator struct { - // ConfigFile is a path to a Kubeconfig file with the webhook configuration + // configFile is a path to a Kubeconfig file with the webhook configuration ConfigFile string `json:"configFile"` - // CacheTTL indicates how long an authentication result should be cached. + // cacheTTL indicates how long an authentication result should be cached. // It takes a valid time duration string (e.g. "5m"). // If empty, you get a default timeout of 2 minutes. // If zero (e.g. "0m"), caching is disabled @@ -720,85 +720,85 @@ type WebhookTokenAuthenticator struct { // OAuthConfig holds the necessary configuration options for OAuth authentication type OAuthConfig struct { - // MasterCA is the CA for verifying the TLS connection back to the MasterURL. + // masterCA is the CA for verifying the TLS connection back to the MasterURL. MasterCA *string `json:"masterCA"` - // MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens + // masterURL is used for making server-to-server calls to exchange authorization codes for access tokens MasterURL string `json:"masterURL"` - // MasterPublicURL is used for building valid client redirect URLs for internal and external access + // masterPublicURL is used for building valid client redirect URLs for internal and external access MasterPublicURL string `json:"masterPublicURL"` - // AssetPublicURL is used for building valid client redirect URLs for external access + // assetPublicURL is used for building valid client redirect URLs for external access AssetPublicURL string `json:"assetPublicURL"` - // AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + // alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` - // IdentityProviders is an ordered list of ways for a user to identify themselves + // identityProviders is an ordered list of ways for a user to identify themselves IdentityProviders []IdentityProvider `json:"identityProviders"` - // GrantConfig describes how to handle grants + // grantConfig describes how to handle grants GrantConfig GrantConfig `json:"grantConfig"` - // SessionConfig hold information about configuring sessions. + // sessionConfig hold information about configuring sessions. SessionConfig *SessionConfig `json:"sessionConfig"` - // TokenConfig contains options for authorization and access tokens + // tokenConfig contains options for authorization and access tokens TokenConfig TokenConfig `json:"tokenConfig"` - // Templates allow you to customize pages like the login page. + // templates allow you to customize pages like the login page. Templates *OAuthTemplates `json:"templates"` } // OAuthTemplates allow for customization of pages like the login page type OAuthTemplates struct { - // Login is a path to a file containing a go template used to render the login page. + // login is a path to a file containing a go template used to render the login page. // If unspecified, the default login page is used. Login string `json:"login"` - // ProviderSelection is a path to a file containing a go template used to render the provider selection page. + // providerSelection is a path to a file containing a go template used to render the provider selection page. // If unspecified, the default provider selection page is used. ProviderSelection string `json:"providerSelection"` - // Error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // error is a path to a file containing a go template used to render error pages during the authentication or grant flow // If unspecified, the default error page is used. Error string `json:"error"` } // ServiceAccountConfig holds the necessary configuration options for a service account type ServiceAccountConfig struct { - // ManagedNames is a list of service account names that will be auto-created in every namespace. + // managedNames is a list of service account names that will be auto-created in every namespace. // If no names are specified, the ServiceAccountsController will not be started. ManagedNames []string `json:"managedNames"` - // LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace + // limitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace // without explicitly referencing them LimitSecretReferences bool `json:"limitSecretReferences"` - // PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. + // privateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. // If no private key is specified, the service account TokensController will not be started. PrivateKeyFile string `json:"privateKeyFile"` - // PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // publicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. // (If any file contains a private key, the public portion of the key is used) // The list of public keys is used to verify presented service account tokens. // Each key is tried in order until the list is exhausted or verification succeeds. // If no keys are specified, no service account authentication will be available. PublicKeyFiles []string `json:"publicKeyFiles"` - // MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically + // masterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically // inject the contents of this file into pods so they can verify connections to the master. MasterCA string `json:"masterCA"` } // TokenConfig holds the necessary configuration options for authorization and access tokens type TokenConfig struct { - // AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + // authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds"` - // AccessTokenMaxAgeSeconds defines the maximum age of access tokens + // accessTokenMaxAgeSeconds defines the maximum age of access tokens AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"` - // AccessTokenInactivityTimeoutSeconds defined the default token + // accessTokenInactivityTimeoutSeconds defined the default token // inactivity timeout for tokens granted by any client. // Setting it to nil means the feature is completely disabled (default) // The default setting can be overriden on OAuthClient basis. @@ -815,12 +815,12 @@ type TokenConfig struct { // SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession type SessionConfig struct { - // SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object // If no file is specified, a random signing and encryption key are generated at each server start SessionSecretsFile string `json:"sessionSecretsFile"` - // SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + // sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` - // SessionName is the cookie name used to store the session + // sessionName is the cookie name used to store the session SessionName string `json:"sessionName"` } @@ -834,7 +834,7 @@ type SessionConfig struct { type SessionSecrets struct { metav1.TypeMeta `json:",inline"` - // Secrets is a list of secrets + // secrets is a list of secrets // New sessions are signed and encrypted using the first secret. // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. Secrets []SessionSecret `json:"secrets"` @@ -842,23 +842,23 @@ type SessionSecrets struct { // SessionSecret is a secret used to authenticate/decrypt cookie-based sessions type SessionSecret struct { - // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + // authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. Authentication string `json:"authentication"` - // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + // encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- Encryption string `json:"encryption"` } // IdentityProvider provides identities for users authenticating using credentials type IdentityProvider struct { - // Name is used to qualify the identities returned by this provider + // name is used to qualify the identities returned by this provider Name string `json:"name"` // UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider UseAsChallenger bool `json:"challenge"` // UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against UseAsLogin bool `json:"login"` - // MappingMethod determines how identities from this provider are mapped to users + // mappingMethod determines how identities from this provider are mapped to users MappingMethod string `json:"mappingMethod"` - // Provider contains the information about how to set up a specific identity provider + // provider contains the information about how to set up a specific identity provider Provider runtime.RawExtension `json:"provider"` } @@ -908,7 +908,7 @@ type DenyAllPasswordIdentityProvider struct { type HTPasswdPasswordIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // File is a reference to your htpasswd file + // file is a reference to your htpasswd file File string `json:"file"` } @@ -921,38 +921,38 @@ type HTPasswdPasswordIdentityProvider struct { // +openshift:compatibility-gen:internal type LDAPPasswordIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is // ldap://host:port/basedn?attribute?scope?filter URL string `json:"url"` - // BindDN is an optional DN to bind with during the search phase. + // bindDN is an optional DN to bind with during the search phase. BindDN string `json:"bindDN"` - // BindPassword is an optional password to bind with during the search phase. + // bindPassword is an optional password to bind with during the search phase. BindPassword StringSource `json:"bindPassword"` // Insecure, if true, indicates the connection should not use TLS. // Cannot be set to true with a URL scheme of "ldaps://" // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 Insecure bool `json:"insecure"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` - // Attributes maps LDAP attributes to identities + // attributes maps LDAP attributes to identities Attributes LDAPAttributeMapping `json:"attributes"` } // LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields type LDAPAttributeMapping struct { - // ID is the list of attributes whose values should be used as the user ID. Required. + // id is the list of attributes whose values should be used as the user ID. Required. // LDAP standard identity attribute is "dn" ID []string `json:"id"` - // PreferredUsername is the list of attributes whose values should be used as the preferred username. + // preferredUsername is the list of attributes whose values should be used as the preferred username. // LDAP standard login attribute is "uid" PreferredUsername []string `json:"preferredUsername"` - // Name is the list of attributes whose values should be used as the display name. Optional. + // name is the list of attributes whose values should be used as the display name. Optional. // If unspecified, no display name is set for the identity // LDAP standard display name attribute is "cn" Name []string `json:"name"` - // Email is the list of attributes whose values should be used as the email address. Optional. + // email is the list of attributes whose values should be used as the email address. Optional. // If unspecified, no email is set for the identity Email []string `json:"email"` } @@ -970,7 +970,7 @@ type KeystonePasswordIdentityProvider struct { RemoteConnectionInfo `json:",inline"` // Domain Name is required for keystone v3 DomainName string `json:"domainName"` - // UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + // useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username UseKeystoneIdentity bool `json:"useKeystoneIdentity"` } @@ -984,7 +984,7 @@ type KeystonePasswordIdentityProvider struct { type RequestHeaderIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // LoginURL is a URL to redirect unauthenticated /authorize requests to + // loginURL is a URL to redirect unauthenticated /authorize requests to // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here // ${url} is replaced with the current URL, escaped to be safe in a query parameter // https://www.example.com/sso-login?then=${url} @@ -992,7 +992,7 @@ type RequestHeaderIdentityProvider struct { // https://www.example.com/auth-proxy/oauth/authorize?${query} LoginURL string `json:"loginURL"` - // ChallengeURL is a URL to redirect unauthenticated /authorize requests to + // challengeURL is a URL to redirect unauthenticated /authorize requests to // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here // ${url} is replaced with the current URL, escaped to be safe in a query parameter // https://www.example.com/sso-login?then=${url} @@ -1000,18 +1000,18 @@ type RequestHeaderIdentityProvider struct { // https://www.example.com/auth-proxy/oauth/authorize?${query} ChallengeURL string `json:"challengeURL"` - // ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + // clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. ClientCA string `json:"clientCA"` - // ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + // clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. ClientCommonNames []string `json:"clientCommonNames"` - // Headers is the set of headers to check for identity information + // headers is the set of headers to check for identity information Headers []string `json:"headers"` - // PreferredUsernameHeaders is the set of headers to check for the preferred username + // preferredUsernameHeaders is the set of headers to check for the preferred username PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` - // NameHeaders is the set of headers to check for the display name + // nameHeaders is the set of headers to check for the display name NameHeaders []string `json:"nameHeaders"` - // EmailHeaders is the set of headers to check for the email address + // emailHeaders is the set of headers to check for the email address EmailHeaders []string `json:"emailHeaders"` } @@ -1025,18 +1025,18 @@ type RequestHeaderIdentityProvider struct { type GitHubIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // Organizations optionally restricts which organizations are allowed to log in + // organizations optionally restricts which organizations are allowed to log in Organizations []string `json:"organizations"` - // Teams optionally restricts which teams are allowed to log in. Format is /. + // teams optionally restricts which teams are allowed to log in. Format is /. Teams []string `json:"teams"` - // Hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. Hostname string `json:"hostname"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server. + // ca is the optional trusted certificate authority bundle to use when making requests to the server. // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. CA string `json:"ca"` } @@ -1051,16 +1051,16 @@ type GitHubIdentityProvider struct { type GitLabIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` - // URL is the oauth server base URL + // url is the oauth server base URL URL string `json:"url"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // Legacy determines if OAuth2 or OIDC should be used + // legacy determines if OAuth2 or OIDC should be used // If true, OAuth2 is used // If false, OIDC is used // If nil and the URL's host is gitlab.com, OIDC is used @@ -1080,12 +1080,12 @@ type GitLabIdentityProvider struct { type GoogleIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // HostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to HostedDomain string `json:"hostedDomain"` } @@ -1099,35 +1099,35 @@ type GoogleIdentityProvider struct { type OpenIDIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // ExtraScopes are any scopes to request in addition to the standard "openid" scope. + // extraScopes are any scopes to request in addition to the standard "openid" scope. ExtraScopes []string `json:"extraScopes"` - // ExtraAuthorizeParameters are any custom parameters to add to the authorize request. + // extraAuthorizeParameters are any custom parameters to add to the authorize request. ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` - // URLs to use to authenticate + // urls to use to authenticate URLs OpenIDURLs `json:"urls"` - // Claims mappings + // claims mappings Claims OpenIDClaims `json:"claims"` } // OpenIDURLs are URLs to use when authenticating with an OpenID identity provider type OpenIDURLs struct { - // Authorize is the oauth authorization URL + // authorize is the oauth authorization URL Authorize string `json:"authorize"` - // Token is the oauth token granting URL + // token is the oauth token granting URL Token string `json:"token"` - // UserInfo is the optional userinfo URL. + // userInfo is the optional userinfo URL. // If present, a granted access_token is used to request claims // If empty, a granted id_token is parsed for claims UserInfo string `json:"userInfo"` @@ -1135,23 +1135,23 @@ type OpenIDURLs struct { // OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider type OpenIDClaims struct { - // ID is the list of claims whose values should be used as the user ID. Required. + // id is the list of claims whose values should be used as the user ID. Required. // OpenID standard identity claim is "sub" ID []string `json:"id"` - // PreferredUsername is the list of claims whose values should be used as the preferred username. + // preferredUsername is the list of claims whose values should be used as the preferred username. // If unspecified, the preferred username is determined from the value of the id claim PreferredUsername []string `json:"preferredUsername"` - // Name is the list of claims whose values should be used as the display name. Optional. + // name is the list of claims whose values should be used as the display name. Optional. // If unspecified, no display name is set for the identity Name []string `json:"name"` - // Email is the list of claims whose values should be used as the email address. Optional. + // email is the list of claims whose values should be used as the email address. Optional. // If unspecified, no email is set for the identity Email []string `json:"email"` } // GrantConfig holds the necessary configuration options for grant handlers type GrantConfig struct { - // Method determines the default strategy to use when an OAuth client requests a grant. + // method determines the default strategy to use when an OAuth client requests a grant. // This method will be used only if the specific OAuth client doesn't provide a strategy // of their own. Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients @@ -1159,7 +1159,7 @@ type GrantConfig struct { // - deny: always denies grant requests, useful for black-listed clients Method GrantHandlerType `json:"method"` - // ServiceAccountMethod is used for determining client authorization for service account oauth client. + // serviceAccountMethod is used for determining client authorization for service account oauth client. // It must be either: deny, prompt ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` } @@ -1177,13 +1177,13 @@ const ( // EtcdConfig holds the necessary configuration options for connecting with an etcd database type EtcdConfig struct { - // ServingInfo describes how to start serving the etcd master + // servingInfo describes how to start serving the etcd master ServingInfo ServingInfo `json:"servingInfo"` - // Address is the advertised host:port for client connections to etcd + // address is the advertised host:port for client connections to etcd Address string `json:"address"` - // PeerServingInfo describes how to start serving the etcd peer + // peerServingInfo describes how to start serving the etcd peer PeerServingInfo ServingInfo `json:"peerServingInfo"` - // PeerAddress is the advertised host:port for peer connections to etcd + // peerAddress is the advertised host:port for peer connections to etcd PeerAddress string `json:"peerAddress"` // StorageDir is the path to the etcd storage directory @@ -1192,42 +1192,42 @@ type EtcdConfig struct { // KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master type KubernetesMasterConfig struct { - // APILevels is a list of API levels that should be enabled on startup: v1 as examples + // apiLevels is a list of API levels that should be enabled on startup: v1 as examples APILevels []string `json:"apiLevels"` - // DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled. + // disabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled. DisabledAPIGroupVersions map[string][]string `json:"disabledAPIGroupVersions"` - // MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used. + // masterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used. MasterIP string `json:"masterIP"` - // MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked + // masterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked // at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to // reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and // out of the kubernetes service record. It is not recommended to set this value below 15s. MasterEndpointReconcileTTL int `json:"masterEndpointReconcileTTL"` - // ServicesSubnet is the subnet to use for assigning service IPs + // servicesSubnet is the subnet to use for assigning service IPs ServicesSubnet string `json:"servicesSubnet"` - // ServicesNodePortRange is the range to use for assigning service public ports on a host. + // servicesNodePortRange is the range to use for assigning service public ports on a host. ServicesNodePortRange string `json:"servicesNodePortRange"` - // SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules. + // schedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules. SchedulerConfigFile string `json:"schedulerConfigFile"` - // PodEvictionTimeout controls grace period for deleting pods on failed nodes. + // podEvictionTimeout controls grace period for deleting pods on failed nodes. // It takes valid time duration string. If empty, you get the default pod eviction timeout. PodEvictionTimeout string `json:"podEvictionTimeout"` - // ProxyClientInfo specifies the client cert/key to use when proxying to pods + // proxyClientInfo specifies the client cert/key to use when proxying to pods ProxyClientInfo CertInfo `json:"proxyClientInfo"` - // APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's + // apiServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. APIServerArguments ExtendedArguments `json:"apiServerArguments"` - // ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the + // controllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the // controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist // the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid // configurations. ControllerArguments ExtendedArguments `json:"controllerArguments"` - // SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's + // schedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. SchedulerArguments ExtendedArguments `json:"schedulerArguments"` @@ -1235,19 +1235,19 @@ type KubernetesMasterConfig struct { // CertInfo relates a certificate with a private key type CertInfo struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` - // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile KeyFile string `json:"keyFile"` } // PodManifestConfig holds the necessary configuration options for using pod manifests type PodManifestConfig struct { - // Path specifies the path for the pod manifest file or directory + // path specifies the path for the pod manifest file or directory // If its a directory, its expected to contain on or more manifest files // This is used by the Kubelet to create pods on the node Path string `json:"path"` - // FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data + // fileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data // The interval needs to be a positive value FileCheckIntervalSeconds int64 `json:"fileCheckIntervalSeconds"` } @@ -1261,16 +1261,16 @@ type StringSource struct { // StringSourceSpec specifies a string value, or external location type StringSourceSpec struct { - // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + // value specifies the cleartext value, or an encrypted value if keyFile is specified. Value string `json:"value"` - // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. Env string `json:"env"` - // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. File string `json:"file"` - // KeyFile references a file containing the key to use to decrypt the value. + // keyFile references a file containing the key to use to decrypt the value. KeyFile string `json:"keyFile"` } @@ -1286,16 +1286,16 @@ type LDAPSyncConfig struct { // Host is the scheme, host and port of the LDAP server to connect to: // scheme://host:port URL string `json:"url"` - // BindDN is an optional DN to bind to the LDAP server with + // bindDN is an optional DN to bind to the LDAP server with BindDN string `json:"bindDN"` - // BindPassword is an optional password to bind with during the search phase. + // bindPassword is an optional password to bind with during the search phase. BindPassword StringSource `json:"bindPassword"` // Insecure, if true, indicates the connection should not use TLS. // Cannot be set to true with a URL scheme of "ldaps://" // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 Insecure bool `json:"insecure"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` @@ -1329,33 +1329,33 @@ type RFC2307Config struct { // (ldapGroupUID) GroupUIDAttribute string `json:"groupUIDAttribute"` - // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for // an OpenShift group GroupNameAttributes []string `json:"groupNameAttributes"` - // GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. + // groupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. // The values contained in those attributes must be queryable by your UserUIDAttribute GroupMembershipAttributes []string `json:"groupMembershipAttributes"` // AllUsersQuery holds the template for an LDAP query that returns user entries. AllUsersQuery LDAPQuery `json:"usersQuery"` - // UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. + // userUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. // It must correspond to values that will be found from the GroupMembershipAttributes UserUIDAttribute string `json:"userUIDAttribute"` - // UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. + // userNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. // The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider UserNameAttributes []string `json:"userNameAttributes"` - // TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are + // tolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are // encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only // and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find // any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause // group membership to be removed, so it is recommended to use this flag with caution. TolerateMemberNotFoundErrors bool `json:"tolerateMemberNotFoundErrors"` - // TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries + // tolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries // are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all // user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail // if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP @@ -1370,10 +1370,10 @@ type ActiveDirectoryConfig struct { // AllUsersQuery holds the template for an LDAP query that returns user entries. AllUsersQuery LDAPQuery `json:"usersQuery"` - // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + // userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. UserNameAttributes []string `json:"userNameAttributes"` - // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted // as the groups it is a member of GroupMembershipAttributes []string `json:"groupMembershipAttributes"` } @@ -1384,10 +1384,10 @@ type AugmentedActiveDirectoryConfig struct { // AllUsersQuery holds the template for an LDAP query that returns user entries. AllUsersQuery LDAPQuery `json:"usersQuery"` - // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + // userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. UserNameAttributes []string `json:"userNameAttributes"` - // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted // as the groups it is a member of GroupMembershipAttributes []string `json:"groupMembershipAttributes"` @@ -1398,7 +1398,7 @@ type AugmentedActiveDirectoryConfig struct { // (ldapGroupUID) GroupUIDAttribute string `json:"groupUIDAttribute"` - // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for // an OpenShift group GroupNameAttributes []string `json:"groupNameAttributes"` } @@ -1427,45 +1427,45 @@ type LDAPQuery struct { // before the wait for a response is given up. If this is 0, no client-side limit is imposed TimeLimit int `json:"timeout"` - // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + // filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN Filter string `json:"filter"` - // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + // pageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. PageSize int `json:"pageSize"` } // AdmissionPluginConfig holds the necessary configuration options for admission plugins type AdmissionPluginConfig struct { - // Location is the path to a configuration file that contains the plugin's + // location is the path to a configuration file that contains the plugin's // configuration Location string `json:"location"` - // Configuration is an embedded configuration object to be used as the plugin's + // configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. Configuration runtime.RawExtension `json:"configuration"` } // AdmissionConfig holds the necessary configuration options for admission type AdmissionConfig struct { - // PluginConfig allows specifying a configuration file per admission control plugin + // pluginConfig allows specifying a configuration file per admission control plugin PluginConfig map[string]*AdmissionPluginConfig `json:"pluginConfig"` - // PluginOrderOverride is a list of admission control plugin names that will be installed + // pluginOrderOverride is a list of admission control plugin names that will be installed // on the master. Order is significant. If empty, a default list of plugins is used. PluginOrderOverride []string `json:"pluginOrderOverride,omitempty"` } // ControllerConfig holds configuration values for controllers type ControllerConfig struct { - // Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ + // controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ // named 'foo', '-foo' disables the controller named 'foo'. // Defaults to "*". Controllers []string `json:"controllers"` - // Election defines the configuration for electing a controller instance to make changes to + // election defines the configuration for electing a controller instance to make changes to // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the // legacy direct etcd election code will be used. Election *ControllerElectionConfig `json:"election"` - // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for + // serviceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. ServiceServingCert ServiceServingCert `json:"serviceServingCert"` } @@ -1473,29 +1473,29 @@ type ControllerConfig struct { // ControllerElectionConfig contains configuration values for deciding how a controller // will be elected to act as leader. type ControllerElectionConfig struct { - // LockName is the resource name used to act as the lock for determining which controller + // lockName is the resource name used to act as the lock for determining which controller // instance should lead. LockName string `json:"lockName"` - // LockNamespace is the resource namespace used to act as the lock for determining which + // lockNamespace is the resource namespace used to act as the lock for determining which // controller instance should lead. It defaults to "kube-system" LockNamespace string `json:"lockNamespace"` - // LockResource is the group and resource name to use to coordinate for the controller lock. + // lockResource is the group and resource name to use to coordinate for the controller lock. // If unset, defaults to "configmaps". LockResource GroupResource `json:"lockResource"` } // GroupResource points to a resource by its name and API group. type GroupResource struct { - // Group is the name of an API group + // group is the name of an API group Group string `json:"group"` - // Resource is the name of a resource. + // resource is the name of a resource. Resource string `json:"resource"` } // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. type ServiceServingCert struct { - // Signer holds the signing information used to automatically sign serving certificates. + // signer holds the signing information used to automatically sign serving certificates. // If this value is nil, then certs are not signed automatically. Signer *CertInfo `json:"signer"` } @@ -1512,7 +1512,7 @@ type ServiceServingCert struct { type DefaultAdmissionConfig struct { metav1.TypeMeta `json:",inline"` - // Disable turns off an admission plugin that is enabled by default. + // disable turns off an admission plugin that is enabled by default. Disable bool `json:"disable"` } diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go index daa0868b6..42444e8ae 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -14,8 +14,8 @@ package v1 var map_ActiveDirectoryConfig = map[string]string{ "": "ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the Active Directory schema", "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", - "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", - "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "userNameAttributes": "userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", } func (ActiveDirectoryConfig) SwaggerDoc() map[string]string { @@ -24,8 +24,8 @@ func (ActiveDirectoryConfig) SwaggerDoc() map[string]string { var map_AdmissionConfig = map[string]string{ "": "AdmissionConfig holds the necessary configuration options for admission", - "pluginConfig": "PluginConfig allows specifying a configuration file per admission control plugin", - "pluginOrderOverride": "PluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", + "pluginConfig": "pluginConfig allows specifying a configuration file per admission control plugin", + "pluginOrderOverride": "pluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", } func (AdmissionConfig) SwaggerDoc() map[string]string { @@ -34,8 +34,8 @@ func (AdmissionConfig) SwaggerDoc() map[string]string { var map_AdmissionPluginConfig = map[string]string{ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", - "location": "Location is the path to a configuration file that contains the plugin's configuration", - "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", } func (AdmissionPluginConfig) SwaggerDoc() map[string]string { @@ -44,7 +44,7 @@ func (AdmissionPluginConfig) SwaggerDoc() map[string]string { var map_AggregatorConfig = map[string]string{ "": "AggregatorConfig holds information required to make the aggregator function.", - "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", } func (AggregatorConfig) SwaggerDoc() map[string]string { @@ -66,8 +66,8 @@ var map_AuditConfig = map[string]string{ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", "maximumRetainedFiles": "Maximum number of old log files to retain.", "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", - "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", - "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "logFormat": "Format of saved audits (legacy or json).", "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", "webHookMode": "Strategy for sending audit events (block or batch).", @@ -80,11 +80,11 @@ func (AuditConfig) SwaggerDoc() map[string]string { var map_AugmentedActiveDirectoryConfig = map[string]string{ "": "AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the augmented Active Directory schema", "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", - "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", - "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "userNameAttributes": "userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", - "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "groupNameAttributes": "groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", } func (AugmentedActiveDirectoryConfig) SwaggerDoc() map[string]string { @@ -131,8 +131,8 @@ func (BuildOverridesConfig) SwaggerDoc() map[string]string { var map_CertInfo = map[string]string{ "": "CertInfo relates a certificate with a private key", - "certFile": "CertFile is a file containing a PEM-encoded certificate", - "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", } func (CertInfo) SwaggerDoc() map[string]string { @@ -141,10 +141,10 @@ func (CertInfo) SwaggerDoc() map[string]string { var map_ClientConnectionOverrides = map[string]string{ "": "ClientConnectionOverrides are a set of overrides to the default client connection settings.", - "acceptContentTypes": "AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", - "contentType": "ContentType is the content type used when sending data to the server from this client.", - "qps": "QPS controls the number of queries per second allowed for this connection.", - "burst": "Burst allows extra queries to accumulate when a client is exceeding its rate.", + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", } func (ClientConnectionOverrides) SwaggerDoc() map[string]string { @@ -153,8 +153,8 @@ func (ClientConnectionOverrides) SwaggerDoc() map[string]string { var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", - "cidr": "CIDR defines the total range of a cluster networks address space.", - "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", + "cidr": "cidr defines the total range of a cluster networks address space.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { @@ -163,9 +163,9 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_ControllerConfig = map[string]string{ "": "ControllerConfig holds configuration values for controllers", - "controllers": "Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", - "election": "Election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", - "serviceServingCert": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "controllers": "controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", + "election": "election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "serviceServingCert": "serviceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", } func (ControllerConfig) SwaggerDoc() map[string]string { @@ -174,9 +174,9 @@ func (ControllerConfig) SwaggerDoc() map[string]string { var map_ControllerElectionConfig = map[string]string{ "": "ControllerElectionConfig contains configuration values for deciding how a controller will be elected to act as leader.", - "lockName": "LockName is the resource name used to act as the lock for determining which controller instance should lead.", - "lockNamespace": "LockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", - "lockResource": "LockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", + "lockName": "lockName is the resource name used to act as the lock for determining which controller instance should lead.", + "lockNamespace": "lockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", + "lockResource": "lockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", } func (ControllerElectionConfig) SwaggerDoc() map[string]string { @@ -185,9 +185,9 @@ func (ControllerElectionConfig) SwaggerDoc() map[string]string { var map_DNSConfig = map[string]string{ "": "DNSConfig holds the necessary configuration options for DNS", - "bindAddress": "BindAddress is the ip:port to serve DNS on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "allowRecursiveQueries": "AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", + "bindAddress": "bindAddress is the ip:port to serve DNS on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "allowRecursiveQueries": "allowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", } func (DNSConfig) SwaggerDoc() map[string]string { @@ -196,7 +196,7 @@ func (DNSConfig) SwaggerDoc() map[string]string { var map_DefaultAdmissionConfig = map[string]string{ "": "DefaultAdmissionConfig can be used to enable or disable various admission plugins. When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, this will cause an \"off by default\" admission plugin to be enabled\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "disable": "Disable turns off an admission plugin that is enabled by default.", + "disable": "disable turns off an admission plugin that is enabled by default.", } func (DefaultAdmissionConfig) SwaggerDoc() map[string]string { @@ -213,9 +213,9 @@ func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string { var map_DockerConfig = map[string]string{ "": "DockerConfig holds Docker related configuration options.", - "execHandlerName": "ExecHandlerName is the name of the handler to use for executing commands in containers.", - "dockerShimSocket": "DockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", - "dockerShimRootDirectory": "DockershimRootDirectory is the dockershim root directory.", + "execHandlerName": "execHandlerName is the name of the handler to use for executing commands in containers.", + "dockerShimSocket": "dockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", + "dockerShimRootDirectory": "dockerShimRootDirectory is the dockershim root directory.", } func (DockerConfig) SwaggerDoc() map[string]string { @@ -224,10 +224,10 @@ func (DockerConfig) SwaggerDoc() map[string]string { var map_EtcdConfig = map[string]string{ "": "EtcdConfig holds the necessary configuration options for connecting with an etcd database", - "servingInfo": "ServingInfo describes how to start serving the etcd master", - "address": "Address is the advertised host:port for client connections to etcd", - "peerServingInfo": "PeerServingInfo describes how to start serving the etcd peer", - "peerAddress": "PeerAddress is the advertised host:port for peer connections to etcd", + "servingInfo": "servingInfo describes how to start serving the etcd master", + "address": "address is the advertised host:port for client connections to etcd", + "peerServingInfo": "peerServingInfo describes how to start serving the etcd peer", + "peerAddress": "peerAddress is the advertised host:port for peer connections to etcd", "storageDirectory": "StorageDir is the path to the etcd storage directory", } @@ -237,8 +237,8 @@ func (EtcdConfig) SwaggerDoc() map[string]string { var map_EtcdConnectionInfo = map[string]string{ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", - "urls": "URLs are the URLs for etcd", - "ca": "CA is a file containing trusted roots for the etcd server certificates", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", } func (EtcdConnectionInfo) SwaggerDoc() map[string]string { @@ -247,10 +247,10 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string { var map_EtcdStorageConfig = map[string]string{ "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes", - "kubernetesStorageVersion": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", - "kubernetesStoragePrefix": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", - "openShiftStorageVersion": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", - "openShiftStoragePrefix": "OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", + "kubernetesStorageVersion": "kubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "kubernetesStoragePrefix": "kubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", + "openShiftStorageVersion": "openShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "openShiftStoragePrefix": "openShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", } func (EtcdStorageConfig) SwaggerDoc() map[string]string { @@ -259,12 +259,12 @@ func (EtcdStorageConfig) SwaggerDoc() map[string]string { var map_GitHubIdentityProvider = map[string]string{ "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "organizations": "Organizations optionally restricts which organizations are allowed to log in", - "teams": "Teams optionally restricts which teams are allowed to log in. Format is /.", - "hostname": "Hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", } func (GitHubIdentityProvider) SwaggerDoc() map[string]string { @@ -273,11 +273,11 @@ func (GitHubIdentityProvider) SwaggerDoc() map[string]string { var map_GitLabIdentityProvider = map[string]string{ "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", - "url": "URL is the oauth server base URL", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "legacy": "Legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "url": "url is the oauth server base URL", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "legacy": "legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", } func (GitLabIdentityProvider) SwaggerDoc() map[string]string { @@ -286,9 +286,9 @@ func (GitLabIdentityProvider) SwaggerDoc() map[string]string { var map_GoogleIdentityProvider = map[string]string{ "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "hostedDomain": "HostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", } func (GoogleIdentityProvider) SwaggerDoc() map[string]string { @@ -297,8 +297,8 @@ func (GoogleIdentityProvider) SwaggerDoc() map[string]string { var map_GrantConfig = map[string]string{ "": "GrantConfig holds the necessary configuration options for grant handlers", - "method": "Method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", - "serviceAccountMethod": "ServiceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", + "method": "method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "serviceAccountMethod": "serviceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", } func (GrantConfig) SwaggerDoc() map[string]string { @@ -307,8 +307,8 @@ func (GrantConfig) SwaggerDoc() map[string]string { var map_GroupResource = map[string]string{ "": "GroupResource points to a resource by its name and API group.", - "group": "Group is the name of an API group", - "resource": "Resource is the name of a resource.", + "group": "group is the name of an API group", + "resource": "resource is the name of a resource.", } func (GroupResource) SwaggerDoc() map[string]string { @@ -317,7 +317,7 @@ func (GroupResource) SwaggerDoc() map[string]string { var map_HTPasswdPasswordIdentityProvider = map[string]string{ "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "file": "File is a reference to your htpasswd file", + "file": "file is a reference to your htpasswd file", } func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { @@ -326,8 +326,8 @@ func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { var map_HTTPServingInfo = map[string]string{ "": "HTTPServingInfo holds configuration for serving HTTP", - "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", - "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } func (HTTPServingInfo) SwaggerDoc() map[string]string { @@ -336,11 +336,11 @@ func (HTTPServingInfo) SwaggerDoc() map[string]string { var map_IdentityProvider = map[string]string{ "": "IdentityProvider provides identities for users authenticating using credentials", - "name": "Name is used to qualify the identities returned by this provider", + "name": "name is used to qualify the identities returned by this provider", "challenge": "UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider", "login": "UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against", - "mappingMethod": "MappingMethod determines how identities from this provider are mapped to users", - "provider": "Provider contains the information about how to set up a specific identity provider", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users", + "provider": "provider contains the information about how to set up a specific identity provider", } func (IdentityProvider) SwaggerDoc() map[string]string { @@ -349,8 +349,8 @@ func (IdentityProvider) SwaggerDoc() map[string]string { var map_ImageConfig = map[string]string{ "": "ImageConfig holds the necessary configuration options for building image names for system components", - "format": "Format is the format of the name to be built for the system component", - "latest": "Latest determines if the latest tag will be pulled from the registry", + "format": "format is the format of the name to be built for the system component", + "latest": "latest determines if the latest tag will be pulled from the registry", } func (ImageConfig) SwaggerDoc() map[string]string { @@ -359,14 +359,14 @@ func (ImageConfig) SwaggerDoc() map[string]string { var map_ImagePolicyConfig = map[string]string{ "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images", - "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", - "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.", - "scheduledImageImportMinimumIntervalSeconds": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", - "maxScheduledImageImportsPerMinute": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", - "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", - "internalRegistryHostname": "InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", - "externalRegistryHostname": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", - "additionalTrustedCA": "AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", + "maxImagesBulkImportedPerRepository": "maxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "disableScheduledImport": "disableScheduledImport allows scheduled background import of images to be disabled.", + "scheduledImageImportMinimumIntervalSeconds": "scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", + "maxScheduledImageImportsPerMinute": "maxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", + "externalRegistryHostname": "externalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", } func (ImagePolicyConfig) SwaggerDoc() map[string]string { @@ -375,11 +375,11 @@ func (ImagePolicyConfig) SwaggerDoc() map[string]string { var map_JenkinsPipelineConfig = map[string]string{ "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", - "autoProvisionEnabled": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", - "templateNamespace": "TemplateNamespace contains the namespace name where the Jenkins template is stored", - "templateName": "TemplateName is the name of the default Jenkins template", - "serviceName": "ServiceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", - "parameters": "Parameters specifies a set of optional parameters to the Jenkins template.", + "autoProvisionEnabled": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "templateNamespace": "templateNamespace contains the namespace name where the Jenkins template is stored", + "templateName": "templateName is the name of the default Jenkins template", + "serviceName": "serviceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "parameters": "parameters specifies a set of optional parameters to the Jenkins template.", } func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { @@ -389,7 +389,7 @@ func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { var map_KeystonePasswordIdentityProvider = map[string]string{ "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "domainName": "Domain Name is required for keystone v3", - "useKeystoneIdentity": "UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", + "useKeystoneIdentity": "useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", } func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { @@ -398,8 +398,8 @@ func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { var map_KubeletConnectionInfo = map[string]string{ "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", - "port": "Port is the port to connect to kubelets on", - "ca": "CA is the CA for verifying TLS connections to kubelets", + "port": "port is the port to connect to kubelets on", + "ca": "ca is the CA for verifying TLS connections to kubelets", } func (KubeletConnectionInfo) SwaggerDoc() map[string]string { @@ -408,18 +408,18 @@ func (KubeletConnectionInfo) SwaggerDoc() map[string]string { var map_KubernetesMasterConfig = map[string]string{ "": "KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master", - "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", - "disabledAPIGroupVersions": "DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", - "masterIP": "MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", - "masterEndpointReconcileTTL": "MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", - "servicesSubnet": "ServicesSubnet is the subnet to use for assigning service IPs", - "servicesNodePortRange": "ServicesNodePortRange is the range to use for assigning service public ports on a host.", - "schedulerConfigFile": "SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", - "podEvictionTimeout": "PodEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", - "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to pods", - "apiServerArguments": "APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", - "controllerArguments": "ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", - "schedulerArguments": "SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "apiLevels": "apiLevels is a list of API levels that should be enabled on startup: v1 as examples", + "disabledAPIGroupVersions": "disabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", + "masterIP": "masterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", + "masterEndpointReconcileTTL": "masterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", + "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", + "schedulerConfigFile": "schedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", + "podEvictionTimeout": "podEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to pods", + "apiServerArguments": "apiServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "controllerArguments": "controllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "schedulerArguments": "schedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", } func (KubernetesMasterConfig) SwaggerDoc() map[string]string { @@ -428,10 +428,10 @@ func (KubernetesMasterConfig) SwaggerDoc() map[string]string { var map_LDAPAttributeMapping = map[string]string{ "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", - "id": "ID is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", - "preferredUsername": "PreferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", - "name": "Name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", - "email": "Email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "id": "id is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", } func (LDAPAttributeMapping) SwaggerDoc() map[string]string { @@ -440,12 +440,12 @@ func (LDAPAttributeMapping) SwaggerDoc() map[string]string { var map_LDAPPasswordIdentityProvider = map[string]string{ "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "url": "URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", - "bindDN": "BindDN is an optional DN to bind with during the search phase.", - "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional password to bind with during the search phase.", "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", - "attributes": "Attributes maps LDAP attributes to identities", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "attributes": "attributes maps LDAP attributes to identities", } func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string { @@ -458,8 +458,8 @@ var map_LDAPQuery = map[string]string{ "scope": "The (optional) scope of the search. Can be: base: only the base object, one: all object on the base level, sub: the entire subtree Defaults to the entire subtree if not set", "derefAliases": "The (optional) behavior of the search with regards to alisases. Can be: never: never dereference aliases, search: only dereference in searching, base: only dereference in finding the base object, always: always dereference Defaults to always dereferencing if not set", "timeout": "TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding before the wait for a response is given up. If this is 0, no client-side limit is imposed", - "filter": "Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", - "pageSize": "PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", + "filter": "filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", + "pageSize": "pageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", } func (LDAPQuery) SwaggerDoc() map[string]string { @@ -469,10 +469,10 @@ func (LDAPQuery) SwaggerDoc() map[string]string { var map_LDAPSyncConfig = map[string]string{ "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port", - "bindDN": "BindDN is an optional DN to bind to the LDAP server with", - "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "bindDN": "bindDN is an optional DN to bind to the LDAP server with", + "bindPassword": "bindPassword is an optional password to bind with during the search phase.", "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "groupUIDNameMapping": "LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to OpenShift Group names", "rfc2307": "RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members", "activeDirectory": "ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of", @@ -494,9 +494,9 @@ func (LocalQuota) SwaggerDoc() map[string]string { var map_MasterAuthConfig = map[string]string{ "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", - "requestHeader": "RequestHeader holds options for setting up a front proxy against the API. It is optional.", + "requestHeader": "requestHeader holds options for setting up a front proxy against the API. It is optional.", "webhookTokenAuthenticators": "WebhookTokenAuthnConfig, if present configures remote token reviewers", - "oauthMetadataFile": "OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", + "oauthMetadataFile": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", } func (MasterAuthConfig) SwaggerDoc() map[string]string { @@ -505,8 +505,8 @@ func (MasterAuthConfig) SwaggerDoc() map[string]string { var map_MasterClients = map[string]string{ "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes", - "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", - "openshiftLoopbackClientConnectionOverrides": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", + "openshiftLoopbackKubeConfig": "openshiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", + "openshiftLoopbackClientConnectionOverrides": "openshiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", } func (MasterClients) SwaggerDoc() map[string]string { @@ -515,33 +515,33 @@ func (MasterClients) SwaggerDoc() map[string]string { var map_MasterConfig = map[string]string{ "": "MasterConfig holds the necessary configuration options for the OpenShift master\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "servingInfo": "ServingInfo describes how to start serving", - "authConfig": "AuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", - "aggregatorConfig": "AggregatorConfig has options for configuring the aggregator component of the API server.", + "servingInfo": "servingInfo describes how to start serving", + "authConfig": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "aggregatorConfig has options for configuring the aggregator component of the API server.", "corsAllowedOrigins": "CORSAllowedOrigins", - "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", - "masterPublicURL": "MasterPublicURL is how clients can access the OpenShift API server", - "controllers": "Controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", - "admissionConfig": "AdmissionConfig contains admission control plugin configuration.", - "controllerConfig": "ControllerConfig holds configuration values for controllers", - "etcdStorageConfig": "EtcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", - "etcdClientInfo": "EtcdClientInfo contains information about how to connect to etcd", - "kubeletClientInfo": "KubeletClientInfo contains information about how to connect to kubelets", + "apiLevels": "apiLevels is a list of API levels that should be enabled on startup: v1 as examples", + "masterPublicURL": "masterPublicURL is how clients can access the OpenShift API server", + "controllers": "controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", + "admissionConfig": "admissionConfig contains admission control plugin configuration.", + "controllerConfig": "controllerConfig holds configuration values for controllers", + "etcdStorageConfig": "etcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", + "etcdClientInfo": "etcdClientInfo contains information about how to connect to etcd", + "kubeletClientInfo": "kubeletClientInfo contains information about how to connect to kubelets", "kubernetesMasterConfig": "KubernetesMasterConfig, if present start the kubernetes master in this process", "etcdConfig": "EtcdConfig, if present start etcd in this process", "oauthConfig": "OAuthConfig, if present start the /oauth endpoint in this process", "dnsConfig": "DNSConfig, if present start the DNS server in this process", - "serviceAccountConfig": "ServiceAccountConfig holds options related to service accounts", - "masterClients": "MasterClients holds all the client connection information for controllers and other system components", - "imageConfig": "ImageConfig holds options that describe how to build image names for system components", - "imagePolicyConfig": "ImagePolicyConfig controls limits and behavior for importing images", - "policyConfig": "PolicyConfig holds information about where to locate critical pieces of bootstrapping policy", - "projectConfig": "ProjectConfig holds information about project creation and defaults", - "routingConfig": "RoutingConfig holds information about routing and route generation", - "networkConfig": "NetworkConfig to be passed to the compiled in network plugin", + "serviceAccountConfig": "serviceAccountConfig holds options related to service accounts", + "masterClients": "masterClients holds all the client connection information for controllers and other system components", + "imageConfig": "imageConfig holds options that describe how to build image names for system components", + "imagePolicyConfig": "imagePolicyConfig controls limits and behavior for importing images", + "policyConfig": "policyConfig holds information about where to locate critical pieces of bootstrapping policy", + "projectConfig": "projectConfig holds information about project creation and defaults", + "routingConfig": "routingConfig holds information about routing and route generation", + "networkConfig": "networkConfig to be passed to the compiled in network plugin", "volumeConfig": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", - "jenkinsPipelineConfig": "JenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", - "auditConfig": "AuditConfig holds information related to auditing capabilities.", + "jenkinsPipelineConfig": "jenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "auditConfig": "auditConfig holds information related to auditing capabilities.", } func (MasterConfig) SwaggerDoc() map[string]string { @@ -550,14 +550,14 @@ func (MasterConfig) SwaggerDoc() map[string]string { var map_MasterNetworkConfig = map[string]string{ "": "MasterNetworkConfig to be passed to the compiled in network plugin", - "networkPluginName": "NetworkPluginName is the name of the network plugin to use", - "clusterNetworkCIDR": "ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", - "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", - "hostSubnetLength": "HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "networkPluginName": "networkPluginName is the name of the network plugin to use", + "clusterNetworkCIDR": "clusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "clusterNetworks": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", + "hostSubnetLength": "hostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", "serviceNetworkCIDR": "ServiceNetwork is the CIDR string to specify the service networks", - "externalIPNetworkCIDRs": "ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", - "ingressIPNetworkCIDR": "IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", - "vxlanPort": "VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", + "externalIPNetworkCIDRs": "externalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", + "ingressIPNetworkCIDR": "ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", + "vxlanPort": "vxlanPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", } func (MasterNetworkConfig) SwaggerDoc() map[string]string { @@ -566,7 +566,7 @@ func (MasterNetworkConfig) SwaggerDoc() map[string]string { var map_MasterVolumeConfig = map[string]string{ "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", - "dynamicProvisioningEnabled": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", + "dynamicProvisioningEnabled": "dynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", } func (MasterVolumeConfig) SwaggerDoc() map[string]string { @@ -575,7 +575,7 @@ func (MasterVolumeConfig) SwaggerDoc() map[string]string { var map_NamedCertificate = map[string]string{ "": "NamedCertificate specifies a certificate/key, and the names it should be served for", - "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", } func (NamedCertificate) SwaggerDoc() map[string]string { @@ -584,10 +584,10 @@ func (NamedCertificate) SwaggerDoc() map[string]string { var map_NodeAuthConfig = map[string]string{ "": "NodeAuthConfig holds authn/authz configuration options", - "authenticationCacheTTL": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", - "authenticationCacheSize": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", - "authorizationCacheTTL": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", - "authorizationCacheSize": "AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", + "authenticationCacheTTL": "authenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authenticationCacheSize": "authenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", + "authorizationCacheTTL": "authorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authorizationCacheSize": "authorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", } func (NodeAuthConfig) SwaggerDoc() map[string]string { @@ -596,29 +596,29 @@ func (NodeAuthConfig) SwaggerDoc() map[string]string { var map_NodeConfig = map[string]string{ "": "NodeConfig is the fully specified config starting an OpenShift node\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "nodeName": "NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", + "nodeName": "nodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", "nodeIP": "Node may have multiple IPs, specify the IP to use for pod traffic routing If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used", - "servingInfo": "ServingInfo describes how to start serving", - "masterKubeConfig": "MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", - "masterClientConnectionOverrides": "MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", - "dnsDomain": "DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", - "dnsIP": "DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", - "dnsBindAddress": "DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", - "dnsNameservers": "DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", - "dnsRecursiveResolvConf": "DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", + "servingInfo": "servingInfo describes how to start serving", + "masterKubeConfig": "masterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", + "masterClientConnectionOverrides": "masterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", + "dnsDomain": "dnsDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", + "dnsIP": "dnsIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", + "dnsBindAddress": "dnsBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", + "dnsNameservers": "dnsNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", + "dnsRecursiveResolvConf": "dnsRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", "networkPluginName": "Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead", - "networkConfig": "NetworkConfig provides network options for the node", - "volumeDirectory": "VolumeDirectory is the directory that volumes will be stored under", - "imageConfig": "ImageConfig holds options that describe how to build image names for system components", - "allowDisabledDocker": "AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", - "podManifestConfig": "PodManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", - "authConfig": "AuthConfig holds authn/authz configuration options", - "dockerConfig": "DockerConfig holds Docker related configuration options.", - "kubeletArguments": "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", - "proxyArguments": "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", - "iptablesSyncPeriod": "IPTablesSyncPeriod is how often iptable rules are refreshed", - "enableUnidling": "EnableUnidling controls whether or not the hybrid unidling proxy will be set up", - "volumeConfig": "VolumeConfig contains options for configuring volumes on the node.", + "networkConfig": "networkConfig provides network options for the node", + "volumeDirectory": "volumeDirectory is the directory that volumes will be stored under", + "imageConfig": "imageConfig holds options that describe how to build image names for system components", + "allowDisabledDocker": "allowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", + "podManifestConfig": "podManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", + "authConfig": "authConfig holds authn/authz configuration options", + "dockerConfig": "dockerConfig holds Docker related configuration options.", + "kubeletArguments": "kubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "proxyArguments": "proxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "iptablesSyncPeriod": "iptablesSyncPeriod is how often iptable rules are refreshed", + "enableUnidling": "enableUnidling controls whether or not the hybrid unidling proxy will be set up", + "volumeConfig": "volumeConfig contains options for configuring volumes on the node.", } func (NodeConfig) SwaggerDoc() map[string]string { @@ -627,7 +627,7 @@ func (NodeConfig) SwaggerDoc() map[string]string { var map_NodeNetworkConfig = map[string]string{ "": "NodeNetworkConfig provides network options for the node", - "networkPluginName": "NetworkPluginName is a string specifying the networking plugin", + "networkPluginName": "networkPluginName is a string specifying the networking plugin", "mtu": "Maximum transmission unit for the network packets", } @@ -637,7 +637,7 @@ func (NodeNetworkConfig) SwaggerDoc() map[string]string { var map_NodeVolumeConfig = map[string]string{ "": "NodeVolumeConfig contains options for configuring volumes on the node.", - "localQuota": "LocalQuota contains options for controlling local volume quota on the node.", + "localQuota": "localQuota contains options for controlling local volume quota on the node.", } func (NodeVolumeConfig) SwaggerDoc() map[string]string { @@ -646,16 +646,16 @@ func (NodeVolumeConfig) SwaggerDoc() map[string]string { var map_OAuthConfig = map[string]string{ "": "OAuthConfig holds the necessary configuration options for OAuth authentication", - "masterCA": "MasterCA is the CA for verifying the TLS connection back to the MasterURL.", - "masterURL": "MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens", - "masterPublicURL": "MasterPublicURL is used for building valid client redirect URLs for internal and external access", - "assetPublicURL": "AssetPublicURL is used for building valid client redirect URLs for external access", - "alwaysShowProviderSelection": "AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", - "identityProviders": "IdentityProviders is an ordered list of ways for a user to identify themselves", - "grantConfig": "GrantConfig describes how to handle grants", - "sessionConfig": "SessionConfig hold information about configuring sessions.", - "tokenConfig": "TokenConfig contains options for authorization and access tokens", - "templates": "Templates allow you to customize pages like the login page.", + "masterCA": "masterCA is the CA for verifying the TLS connection back to the MasterURL.", + "masterURL": "masterURL is used for making server-to-server calls to exchange authorization codes for access tokens", + "masterPublicURL": "masterPublicURL is used for building valid client redirect URLs for internal and external access", + "assetPublicURL": "assetPublicURL is used for building valid client redirect URLs for external access", + "alwaysShowProviderSelection": "alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves", + "grantConfig": "grantConfig describes how to handle grants", + "sessionConfig": "sessionConfig hold information about configuring sessions.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", } func (OAuthConfig) SwaggerDoc() map[string]string { @@ -664,9 +664,9 @@ func (OAuthConfig) SwaggerDoc() map[string]string { var map_OAuthTemplates = map[string]string{ "": "OAuthTemplates allow for customization of pages like the login page", - "login": "Login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", - "providerSelection": "ProviderSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", - "error": "Error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", + "login": "login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "providerSelection": "providerSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "error": "error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", } func (OAuthTemplates) SwaggerDoc() map[string]string { @@ -675,10 +675,10 @@ func (OAuthTemplates) SwaggerDoc() map[string]string { var map_OpenIDClaims = map[string]string{ "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", - "id": "ID is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", - "preferredUsername": "PreferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", - "name": "Name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", - "email": "Email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "id": "id is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", } func (OpenIDClaims) SwaggerDoc() map[string]string { @@ -687,13 +687,13 @@ func (OpenIDClaims) SwaggerDoc() map[string]string { var map_OpenIDIdentityProvider = map[string]string{ "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "extraScopes": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.", - "extraAuthorizeParameters": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.", - "urls": "URLs to use to authenticate", - "claims": "Claims mappings", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "urls": "urls to use to authenticate", + "claims": "claims mappings", } func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { @@ -702,9 +702,9 @@ func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { var map_OpenIDURLs = map[string]string{ "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider", - "authorize": "Authorize is the oauth authorization URL", - "token": "Token is the oauth token granting URL", - "userInfo": "UserInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", + "authorize": "authorize is the oauth authorization URL", + "token": "token is the oauth token granting URL", + "userInfo": "userInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", } func (OpenIDURLs) SwaggerDoc() map[string]string { @@ -713,8 +713,8 @@ func (OpenIDURLs) SwaggerDoc() map[string]string { var map_PodManifestConfig = map[string]string{ "": "PodManifestConfig holds the necessary configuration options for using pod manifests", - "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", - "fileCheckIntervalSeconds": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", + "path": "path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", + "fileCheckIntervalSeconds": "fileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", } func (PodManifestConfig) SwaggerDoc() map[string]string { @@ -723,7 +723,7 @@ func (PodManifestConfig) SwaggerDoc() map[string]string { var map_PolicyConfig = map[string]string{ "": "holds the necessary configuration options for", - "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "userAgentMatchingConfig": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", } func (PolicyConfig) SwaggerDoc() map[string]string { @@ -732,10 +732,10 @@ func (PolicyConfig) SwaggerDoc() map[string]string { var map_ProjectConfig = map[string]string{ "": "holds the necessary configuration options for", - "defaultNodeSelector": "DefaultNodeSelector holds default project node label selector", - "projectRequestMessage": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", - "projectRequestTemplate": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", - "securityAllocator": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", + "securityAllocator": "securityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", } func (ProjectConfig) SwaggerDoc() map[string]string { @@ -746,13 +746,13 @@ var map_RFC2307Config = map[string]string{ "": "RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the RFC2307 schema", "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", - "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", - "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", + "groupNameAttributes": "groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "groupMembershipAttributes": "groupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", - "userUIDAttribute": "UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", - "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", - "tolerateMemberNotFoundErrors": "TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", - "tolerateMemberOutOfScopeErrors": "TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", + "userUIDAttribute": "userUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", + "userNameAttributes": "userNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", + "tolerateMemberNotFoundErrors": "tolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", + "tolerateMemberOutOfScopeErrors": "tolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", } func (RFC2307Config) SwaggerDoc() map[string]string { @@ -761,8 +761,8 @@ func (RFC2307Config) SwaggerDoc() map[string]string { var map_RegistryLocation = map[string]string{ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", } func (RegistryLocation) SwaggerDoc() map[string]string { @@ -771,8 +771,8 @@ func (RegistryLocation) SwaggerDoc() map[string]string { var map_RemoteConnectionInfo = map[string]string{ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "URL is the remote URL to connect to", - "ca": "CA is the CA for verifying TLS connections", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", } func (RemoteConnectionInfo) SwaggerDoc() map[string]string { @@ -781,11 +781,11 @@ func (RemoteConnectionInfo) SwaggerDoc() map[string]string { var map_RequestHeaderAuthenticationOptions = map[string]string{ "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", - "clientCA": "ClientCA is a file with the trusted signer certs. It is required.", - "clientCommonNames": "ClientCommonNames is a required list of common names to require a match from.", - "usernameHeaders": "UsernameHeaders is the list of headers to check for user information. First hit wins.", + "clientCA": "clientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "clientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the list of headers to check for user information. First hit wins.", "groupHeaders": "GroupNameHeader is the set of headers to check for group information. All are unioned.", - "extraHeaderPrefixes": "ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", } func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { @@ -794,14 +794,14 @@ func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { var map_RequestHeaderIdentityProvider = map[string]string{ "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "loginURL": "LoginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", - "challengeURL": "ChallengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", - "clientCA": "ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", - "clientCommonNames": "ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", - "headers": "Headers is the set of headers to check for identity information", - "preferredUsernameHeaders": "PreferredUsernameHeaders is the set of headers to check for the preferred username", - "nameHeaders": "NameHeaders is the set of headers to check for the display name", - "emailHeaders": "EmailHeaders is the set of headers to check for the email address", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "clientCA": "clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", } func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { @@ -810,7 +810,7 @@ func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { var map_RoutingConfig = map[string]string{ "": "RoutingConfig holds the necessary configuration options for routing to subdomains", - "subdomain": "Subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", + "subdomain": "subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", } func (RoutingConfig) SwaggerDoc() map[string]string { @@ -819,9 +819,9 @@ func (RoutingConfig) SwaggerDoc() map[string]string { var map_SecurityAllocator = map[string]string{ "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", - "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", - "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", - "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", + "uidAllocatorRange": "uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", } func (SecurityAllocator) SwaggerDoc() map[string]string { @@ -830,11 +830,11 @@ func (SecurityAllocator) SwaggerDoc() map[string]string { var map_ServiceAccountConfig = map[string]string{ "": "ServiceAccountConfig holds the necessary configuration options for a service account", - "managedNames": "ManagedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", - "limitSecretReferences": "LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", - "privateKeyFile": "PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", - "publicKeyFiles": "PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", - "masterCA": "MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", + "managedNames": "managedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", + "limitSecretReferences": "limitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", + "privateKeyFile": "privateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", + "publicKeyFiles": "publicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "masterCA": "masterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", } func (ServiceAccountConfig) SwaggerDoc() map[string]string { @@ -843,7 +843,7 @@ func (ServiceAccountConfig) SwaggerDoc() map[string]string { var map_ServiceServingCert = map[string]string{ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", - "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", + "signer": "signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", } func (ServiceServingCert) SwaggerDoc() map[string]string { @@ -852,12 +852,12 @@ func (ServiceServingCert) SwaggerDoc() map[string]string { var map_ServingInfo = map[string]string{ "": "ServingInfo holds information about serving web pages", - "bindAddress": "BindAddress is the ip:port to serve on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", - "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", - "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", - "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", } func (ServingInfo) SwaggerDoc() map[string]string { @@ -866,9 +866,9 @@ func (ServingInfo) SwaggerDoc() map[string]string { var map_SessionConfig = map[string]string{ "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession", - "sessionSecretsFile": "SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", - "sessionMaxAgeSeconds": "SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", - "sessionName": "SessionName is the cookie name used to store the session", + "sessionSecretsFile": "sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "sessionMaxAgeSeconds": "sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "sessionName": "sessionName is the cookie name used to store the session", } func (SessionConfig) SwaggerDoc() map[string]string { @@ -877,8 +877,8 @@ func (SessionConfig) SwaggerDoc() map[string]string { var map_SessionSecret = map[string]string{ "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", - "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", - "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", + "authentication": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", } func (SessionSecret) SwaggerDoc() map[string]string { @@ -887,7 +887,7 @@ func (SessionSecret) SwaggerDoc() map[string]string { var map_SessionSecrets = map[string]string{ "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", + "secrets": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", } func (SessionSecrets) SwaggerDoc() map[string]string { @@ -913,10 +913,10 @@ func (StringSource) SwaggerDoc() map[string]string { var map_StringSourceSpec = map[string]string{ "": "StringSourceSpec specifies a string value, or external location", - "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", - "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", - "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", - "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", } func (StringSourceSpec) SwaggerDoc() map[string]string { @@ -925,9 +925,9 @@ func (StringSourceSpec) SwaggerDoc() map[string]string { var map_TokenConfig = map[string]string{ "": "TokenConfig holds the necessary configuration options for authorization and access tokens", - "authorizeTokenMaxAgeSeconds": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", - "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens", - "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", + "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", } func (TokenConfig) SwaggerDoc() map[string]string { @@ -936,7 +936,7 @@ func (TokenConfig) SwaggerDoc() map[string]string { var map_UserAgentDenyRule = map[string]string{ "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", - "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", + "rejectionMessage": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", } func (UserAgentDenyRule) SwaggerDoc() map[string]string { @@ -946,7 +946,7 @@ func (UserAgentDenyRule) SwaggerDoc() map[string]string { var map_UserAgentMatchRule = map[string]string{ "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", "regex": "UserAgentRegex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", - "httpVerbs": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", + "httpVerbs": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", } func (UserAgentMatchRule) SwaggerDoc() map[string]string { @@ -957,7 +957,7 @@ var map_UserAgentMatchingConfig = map[string]string{ "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", "requiredClients": "If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", "deniedClients": "If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", - "defaultRejectionMessage": "DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", + "defaultRejectionMessage": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", } func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { @@ -966,8 +966,8 @@ func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { var map_WebhookTokenAuthenticator = map[string]string{ "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", - "configFile": "ConfigFile is a path to a Kubeconfig file with the webhook configuration", - "cacheTTL": "CacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", + "configFile": "configFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", } func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go index 4b5c8d6ef..12a819672 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go @@ -108,7 +108,7 @@ type AlibabaCloudMachineProviderConfig struct { // +optional DataDisks []DataDiskProperties `json:"dataDisk,omitempty"` - // SecurityGroups is a list of security group references to assign to the instance. + // securityGroups is a list of security group references to assign to the instance. // A reference holds either the security group ID, the resource name, or the required tags to search. // When more than one security group is returned for a tag search, all the groups are associated with the instance up to the // maximum number of security groups to which an instance can belong. @@ -116,32 +116,32 @@ type AlibabaCloudMachineProviderConfig struct { // https://www.alibabacloud.com/help/en/doc-detail/25412.htm SecurityGroups []AlibabaResourceReference `json:"securityGroups,omitempty"` - // Bandwidth describes the internet bandwidth strategy for the instance + // bandwidth describes the internet bandwidth strategy for the instance // +optional Bandwidth BandwidthProperties `json:"bandwidth,omitempty"` - // SystemDisk holds the properties regarding the system disk for the instance + // systemDisk holds the properties regarding the system disk for the instance // +optional SystemDisk SystemDiskProperties `json:"systemDisk,omitempty"` - // VSwitch is a reference to the vswitch to use for this instance. + // vSwitch is a reference to the vswitch to use for this instance. // A reference holds either the vSwitch ID, the resource name, or the required tags to search. // When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. // This parameter is required when you create an instance of the VPC type. // You can call the DescribeVSwitches operation to query the created vSwitches. VSwitch AlibabaResourceReference `json:"vSwitch"` - // RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role. + // ramRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role. // +optional RAMRoleName string `json:"ramRoleName,omitempty"` - // ResourceGroup references the resource group to which to assign the instance. + // resourceGroup references the resource group to which to assign the instance. // A reference holds either the resource group ID, the resource name, or the required tags to search. // When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. // Resource Groups do not support searching by tags. ResourceGroup AlibabaResourceReference `json:"resourceGroup"` - // Tenancy specifies whether to create the instance on a dedicated host. + // tenancy specifies whether to create the instance on a dedicated host. // Valid values: // // default: creates the instance on a non-dedicated host. @@ -151,12 +151,12 @@ type AlibabaCloudMachineProviderConfig struct { // +optional Tenancy InstanceTenancy `json:"tenancy,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions + // credentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions // provided by attached RAM role where the actuator is running. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` @@ -173,15 +173,15 @@ type AlibabaResourceReference struct { // type identifies the resource reference type for this entry. Type AlibabaResourceReferenceType `json:"type"` - // ID of resource + // id of resource // +optional ID *string `json:"id,omitempty"` - // Name of the resource + // name of the resource // +optional Name *string `json:"name,omitempty"` - // Tags is a set of metadata based upon ECS object tags used to identify a resource. + // tags is a set of metadata based upon ECS object tags used to identify a resource. // For details about usage when multiple resources are found, please see the owning parent field documentation. // +optional Tags *[]Tag `json:"tags,omitempty"` @@ -213,23 +213,25 @@ type AlibabaCloudMachineProviderStatus struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // InstanceID is the instance ID of the machine created in alibabacloud + // instanceId is the instance ID of the machine created in alibabacloud // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the state of the alibabacloud instance for this machine + // instanceState is the state of the alibabacloud instance for this machine // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } // SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category type SystemDiskProperties struct { - // Category is the category of the system disk. + // category is the category of the system disk. // Valid values: // cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. // cloud_efficiency: ultra disk. @@ -242,7 +244,7 @@ type SystemDiskProperties struct { // +optional Category string `json:"category,omitempty"` - // PerformanceLevel is the performance level of the ESSD used as the system disk. + // performanceLevel is the performance level of the ESSD used as the system disk. // Valid values: // // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS. @@ -256,14 +258,14 @@ type SystemDiskProperties struct { // +optional PerformanceLevel string `json:"performanceLevel,omitempty"` - // Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). + // name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). // Empty value means the platform chooses a default, which is subject to change over time. // Currently the default is `""`. // +kubebuilder:validation:MaxLength=128 // +optional Name string `json:"name,omitempty"` - // Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. + // size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. // The value must be at least 20 and greater than or equal to the size of the image. // Empty value means the platform chooses a default, which is subject to change over time. // Currently the default is `40` or the size of the image depending on whichever is greater. @@ -357,7 +359,7 @@ type Tag struct { // Bandwidth describes the bandwidth strategy for the network of the instance type BandwidthProperties struct { - // InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: + // internetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: // When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. // Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. // When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. @@ -365,7 +367,7 @@ type BandwidthProperties struct { // +optional InternetMaxBandwidthIn int64 `json:"internetMaxBandwidthIn,omitempty"` - // InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. + // internetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. // When a value greater than 0 is used then a public IP address is assigned to the instance. // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default is `0` diff --git a/vendor/github.com/openshift/api/machine/v1/types_aws.go b/vendor/github.com/openshift/api/machine/v1/types_aws.go index bc8a7efce..5ad2b923f 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_aws.go +++ b/vendor/github.com/openshift/api/machine/v1/types_aws.go @@ -8,17 +8,17 @@ package v1 // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ARN' ? has(self.arn) : !has(self.arn)",message="arn is required when type is ARN, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Filters' ? has(self.filters) : !has(self.filters)",message="filters is required when type is Filters, and forbidden otherwise" type AWSResourceReference struct { - // Type determines how the reference will fetch the AWS resource. + // type determines how the reference will fetch the AWS resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type AWSResourceReferenceType `json:"type"` - // ID of resource. + // id of resource. // +optional ID *string `json:"id,omitempty"` - // ARN of resource. + // arn of resource. // +optional ARN *string `json:"arn,omitempty"` - // Filters is a set of filters used to identify a resource. + // filters is a set of filters used to identify a resource. // +optional // +listType=atomic Filters *[]AWSResourceFilter `json:"filters,omitempty"` @@ -41,10 +41,10 @@ const ( // AWSResourceFilter is a filter used to identify an AWS resource type AWSResourceFilter struct { - // Name of the filter. Filter names are case-sensitive. - // +kubebuilder:validation:Required + // name of the filter. Filter names are case-sensitive. + // +required Name string `json:"name"` - // Values includes one or more filter values. Filter values are case-sensitive. + // values includes one or more filter values. Filter values are case-sensitive. // +optional // +listType=atomic Values []string `json:"values,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index 5ac7a5aae..ead8b2077 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -42,7 +42,25 @@ type ControlPlaneMachineSet struct { // ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. type ControlPlaneMachineSetSpec struct { - // State defines whether the ControlPlaneMachineSet is Active or Inactive. + // machineNamePrefix is the prefix used when creating machine names. + // Each machine name will consist of this prefix, followed by + // a randomly generated string of 5 characters, and the index of the machine. + // It must be a lowercase RFC 1123 subdomain, consisting of lowercase + // alphanumeric characters, hyphens ('-'), and periods ('.'). + // Each block, separated by periods, must start and end with an alphanumeric character. + // Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted. + // The prefix must be between 1 and 245 characters in length. + // For example, if machineNamePrefix is set to 'control-plane', + // and three machines are created, their names might be: + // control-plane-abcde-0, control-plane-fghij-1, control-plane-klmno-2 + // +openshift:validation:FeatureGateAwareXValidation:featureGate=CPMSMachineNamePrefix,rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=245 + // +openshift:enable:FeatureGate=CPMSMachineNamePrefix + // +optional + MachineNamePrefix string `json:"machineNamePrefix,omitempty"` + + // state defines whether the ControlPlaneMachineSet is Active or Inactive. // When Inactive, the ControlPlaneMachineSet will not take any action on the // state of the Machines within the cluster. // When Active, the ControlPlaneMachineSet will reconcile the Machines and @@ -55,7 +73,7 @@ type ControlPlaneMachineSetSpec struct { // +optional State ControlPlaneMachineSetState `json:"state,omitempty"` - // Replicas defines how many Control Plane Machines should be + // replicas defines how many Control Plane Machines should be // created by this ControlPlaneMachineSet. // This field is immutable and cannot be changed after cluster // installation. @@ -64,10 +82,10 @@ type ControlPlaneMachineSetSpec struct { // +kubebuilder:validation:Enum:=3;5 // +kubebuilder:default:=3 // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="replicas is immutable" - // +kubebuilder:validation:Required + // +required Replicas *int32 `json:"replicas"` - // Strategy defines how the ControlPlaneMachineSet will update + // strategy defines how the ControlPlaneMachineSet will update // Machines when it detects a change to the ProviderSpec. // +kubebuilder:default:={type: RollingUpdate} // +optional @@ -78,12 +96,12 @@ type ControlPlaneMachineSetSpec struct { // It must match the template's labels. // This field is considered immutable after creation of the resource. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="selector is immutable" - // +kubebuilder:validation:Required + // +required Selector metav1.LabelSelector `json:"selector"` - // Template describes the Control Plane Machines that will be created + // template describes the Control Plane Machines that will be created // by this ControlPlaneMachineSet. - // +kubebuilder:validation:Required + // +required Template ControlPlaneMachineSetTemplate `json:"template"` } @@ -113,10 +131,10 @@ const ( // + future version of the Machine API Machine. // +kubebuilder:validation:XValidation:rule="has(self.machineType) && self.machineType == 'machines_v1beta1_machine_openshift_io' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)",message="machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise" type ControlPlaneMachineSetTemplate struct { - // MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. + // machineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. // Currently, the only valid value is machines_v1beta1_machine_openshift_io. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required MachineType ControlPlaneMachineSetMachineType `json:"machineType,omitempty"` // OpenShiftMachineV1Beta1Machine defines the template for creating Machines @@ -138,7 +156,7 @@ const ( // OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create // Machines from the v1beta1.machine.openshift.io API group. type OpenShiftMachineV1Beta1MachineTemplate struct { - // FailureDomains is the list of failure domains (sometimes called + // failureDomains is the list of failure domains (sometimes called // availability zones) in which the ControlPlaneMachineSet should balance // the Control Plane Machines. // This will be merged into the ProviderSpec given in the template. @@ -149,16 +167,16 @@ type OpenShiftMachineV1Beta1MachineTemplate struct { // ObjectMeta is the standard object metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // Labels are required to match the ControlPlaneMachineSet selector. - // +kubebuilder:validation:Required + // +required ObjectMeta ControlPlaneMachineSetTemplateObjectMeta `json:"metadata"` - // Spec contains the desired configuration of the Control Plane Machines. + // spec contains the desired configuration of the Control Plane Machines. // The ProviderSpec within contains platform specific details // for creating the Control Plane Machines. // The ProviderSe should be complete apart from the platform specific // failure domain field. This will be overriden when the Machines // are created based on the FailureDomains field. - // +kubebuilder:validation:Required + // +required Spec machinev1beta1.MachineSpec `json:"spec"` } @@ -175,10 +193,10 @@ type ControlPlaneMachineSetTemplateObjectMeta struct { // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-role' in self && self['machine.openshift.io/cluster-api-machine-role'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'" // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-type' in self && self['machine.openshift.io/cluster-api-machine-type'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'" // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-cluster' in self",message="label 'machine.openshift.io/cluster-api-cluster' is required" - // +kubebuilder:validation:Required + // +required Labels map[string]string `json:"labels"` - // Annotations is an unstructured key value map stored with a resource that may be + // annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. // More info: http://kubernetes.io/docs/user-guide/annotations @@ -189,7 +207,7 @@ type ControlPlaneMachineSetTemplateObjectMeta struct { // ControlPlaneMachineSetStrategy defines the strategy for applying updates to the // Control Plane Machines managed by the ControlPlaneMachineSet. type ControlPlaneMachineSetStrategy struct { - // Type defines the type of update strategy that should be + // type defines the type of update strategy that should be // used when updating Machines owned by the ControlPlaneMachineSet. // Valid values are "RollingUpdate" and "OnDelete". // The current default value is "RollingUpdate". @@ -240,23 +258,23 @@ const ( // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'VSphere' ? has(self.vsphere) : !has(self.vsphere)",message="vsphere configuration is required when platform is VSphere, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Nutanix' ? has(self.nutanix) : !has(self.nutanix)",message="nutanix configuration is required when platform is Nutanix, and forbidden otherwise" type FailureDomains struct { - // Platform identifies the platform for which the FailureDomain represents. + // platform identifies the platform for which the FailureDomain represents. // Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Platform configv1.PlatformType `json:"platform"` - // AWS configures failure domain information for the AWS platform. + // aws configures failure domain information for the AWS platform. // +listType=atomic // +optional AWS *[]AWSFailureDomain `json:"aws,omitempty"` - // Azure configures failure domain information for the Azure platform. + // azure configures failure domain information for the Azure platform. // +listType=atomic // +optional Azure *[]AzureFailureDomain `json:"azure,omitempty"` - // GCP configures failure domain information for the GCP platform. + // gcp configures failure domain information for the GCP platform. // +listType=atomic // +optional GCP *[]GCPFailureDomain `json:"gcp,omitempty"` @@ -267,7 +285,7 @@ type FailureDomains struct { // +optional VSphere []VSphereFailureDomain `json:"vsphere,omitempty"` - // OpenStack configures failure domain information for the OpenStack platform. + // openstack configures failure domain information for the OpenStack platform. // +optional // // + --- @@ -289,19 +307,19 @@ type FailureDomains struct { // AWSFailureDomain configures failure domain information for the AWS platform. // +kubebuilder:validation:MinProperties:=1 type AWSFailureDomain struct { - // Subnet is a reference to the subnet to use for this instance. + // subnet is a reference to the subnet to use for this instance. // +optional Subnet *AWSResourceReference `json:"subnet,omitempty"` - // Placement configures the placement information for this instance. + // placement configures the placement information for this instance. // +optional Placement AWSFailureDomainPlacement `json:"placement,omitempty"` } // AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain. type AWSFailureDomainPlacement struct { - // AvailabilityZone is the availability zone of the instance. - // +kubebuilder:validation:Required + // availabilityZone is the availability zone of the instance. + // +required AvailabilityZone string `json:"availabilityZone"` } @@ -309,7 +327,7 @@ type AWSFailureDomainPlacement struct { type AzureFailureDomain struct { // Availability Zone for the virtual machine. // If nil, the virtual machine should be deployed to no zone. - // +kubebuilder:validation:Required + // +required Zone string `json:"zone"` // subnet is the name of the network subnet in which the VM will be created. @@ -322,8 +340,8 @@ type AzureFailureDomain struct { // GCPFailureDomain configures failure domain information for the GCP platform type GCPFailureDomain struct { - // Zone is the zone in which the GCP machine provider will create the VM. - // +kubebuilder:validation:Required + // zone is the zone in which the GCP machine provider will create the VM. + // +required Zone string `json:"zone"` } @@ -333,7 +351,7 @@ type VSphereFailureDomain struct { // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. // When balancing machines across failure domains, the control plane machine set will inject configuration from the // Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -367,7 +385,7 @@ type OpenStackFailureDomain struct { type NutanixFailureDomainReference struct { // name of the failure domain in which the nutanix machine provider will create the VM. // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` @@ -400,7 +418,7 @@ type RootVolume struct { // + the control plane with a root volume. This is because the default volume type in Cinder is not guaranteed // + to be available, therefore we prefer the user to be explicit about the volume type to use. // + We apply the same logic in CPMS: if the failure domain specifies a root volume, we require the user to specify a volume type. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 VolumeType string `json:"volumeType"` @@ -408,36 +426,34 @@ type RootVolume struct { // ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. type ControlPlaneMachineSetStatus struct { - // Conditions represents the observations of the ControlPlaneMachineSet's current state. + // conditions represents the observations of the ControlPlaneMachineSet's current state. // Known .status.conditions.type are: Available, Degraded and Progressing. - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` - // ObservedGeneration is the most recent generation observed for this + // observedGeneration is the most recent generation observed for this // ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, // which is updated on mutation by the API Server. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // Replicas is the number of Control Plane Machines created by the + // replicas is the number of Control Plane Machines created by the // ControlPlaneMachineSet controller. // Note that during update operations this value may differ from the // desired replica count. // +optional Replicas int32 `json:"replicas,omitempty"` - // ReadyReplicas is the number of Control Plane Machines created by the + // readyReplicas is the number of Control Plane Machines created by the // ControlPlaneMachineSet controller which are ready. // Note that this value may be higher than the desired number of replicas // while rolling updates are in-progress. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty"` - // UpdatedReplicas is the number of non-terminated Control Plane Machines + // updatedReplicas is the number of non-terminated Control Plane Machines // created by the ControlPlaneMachineSet controller that have the desired // provider spec and are ready. // This value is set to 0 when a change is detected to the desired spec. @@ -448,7 +464,7 @@ type ControlPlaneMachineSetStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` - // UnavailableReplicas is the number of Control Plane Machines that are + // unavailableReplicas is the number of Control Plane Machines that are // still required before the ControlPlaneMachineSet reaches the desired // available capacity. When this value is non-zero, the number of // ReadyReplicas is less than the desired Replicas. diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go index e5e0ae830..e2ddde2ad 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -23,40 +23,40 @@ type NutanixMachineProviderConfig struct { // of the Prism Central), in which the Machine's VM will be created. // The cluster identifier (uuid or name) can be obtained from the Prism Central console // or using the prism_central API. - // +kubebuilder:validation:Required + // +required Cluster NutanixResourceIdentifier `json:"cluster"` // image is to identify the rhcos image uploaded to the Prism Central (PC) // The image identifier (uuid or name) can be obtained from the Prism Central console // or using the prism_central API. - // +kubebuilder:validation:Required + // +required Image NutanixResourceIdentifier `json:"image"` // subnets holds a list of identifiers (one or more) of the cluster's network subnets // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be // obtained from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 Subnets []NutanixResourceIdentifier `json:"subnets"` // vcpusPerSocket is the number of vCPUs per socket of the VM - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 VCPUsPerSocket int32 `json:"vcpusPerSocket"` // vcpuSockets is the number of vCPU sockets of the VM - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 VCPUSockets int32 `json:"vcpuSockets"` // memorySize is the memory size (in Quantity format) of the VM // The minimum memorySize is 2Gi bytes - // +kubebuilder:validation:Required + // +required MemorySize resource.Quantity `json:"memorySize"` // systemDiskSize is size (in Quantity format) of the system disk of the VM // The minimum systemDiskSize is 20Gi bytes - // +kubebuilder:validation:Required + // +required SystemDiskSize resource.Quantity `json:"systemDiskSize"` // bootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot. @@ -96,7 +96,7 @@ type NutanixMachineProviderConfig struct { // credentialsSecret is a local reference to a secret that contains the // credentials data to access Nutanix PC client - // +kubebuilder:validation:Required + // +required CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"` // failureDomain refers to the name of the FailureDomain with which this Machine is associated. @@ -113,13 +113,13 @@ type NutanixCategory struct { // key is the prism category key name // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 - // +kubebuilder:validation:Required + // +required Key string `json:"key"` // value is the prism category value associated with the key // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 - // +kubebuilder:validation:Required + // +required Value string `json:"value"` } @@ -151,9 +151,9 @@ const ( // NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) // +union type NutanixResourceIdentifier struct { - // Type is the identifier type to use for this resource. + // type is the identifier type to use for this resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:=uuid;name Type NutanixIdentifierType `json:"type"` @@ -186,7 +186,7 @@ type NutanixGPU struct { // type is the identifier type of the GPU device. // Valid values are Name and DeviceID. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NutanixGPUIdentifierType `json:"type"` // deviceID is the GPU device ID with the integer value. @@ -219,7 +219,7 @@ type NutanixStorageResourceIdentifier struct { // type is the identifier type to use for this resource. // The valid value is "uuid". // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:=uuid Type NutanixIdentifierType `json:"type"` @@ -279,13 +279,13 @@ type NutanixVMDiskDeviceProperties struct { // deviceType specifies the disk device type. // The valid values are "Disk" and "CDRom", and the default is "Disk". // +kubebuilder:default=Disk - // +kubebuilder:validation:Required + // +required DeviceType NutanixDiskDeviceType `json:"deviceType"` // adapterType is the adapter type of the disk address. // If the deviceType is "Disk", the valid adapterType can be "SCSI", "IDE", "PCI", "SATA" or "SPAPR". // If the deviceType is "CDRom", the valid adapterType can be "IDE" or "SATA". - // +kubebuilder:validation:Required + // +required AdapterType NutanixDiskAdapterType `json:"adapterType,omitempty"` // deviceIndex is the index of the disk address. The valid values are non-negative integers, with the default value 0. @@ -295,7 +295,7 @@ type NutanixVMDiskDeviceProperties struct { // the deviceIndex should start from 1. // +kubebuilder:default=0 // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Required + // +required DeviceIndex int32 `json:"deviceIndex,omitempty"` } @@ -304,7 +304,7 @@ type NutanixVMDisk struct { // diskSize is size (in Quantity format) of the disk attached to the VM. // See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Format for the Quantity format and example documentation. // The minimum diskSize is 1GB. - // +kubebuilder:validation:Required + // +required DiskSize resource.Quantity `json:"diskSize"` // deviceProperties are the properties of the disk device. @@ -331,6 +331,8 @@ type NutanixMachineProviderStatus struct { // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` // vmUUID is the Machine associated VM's UUID diff --git a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go index c131139c5..d3a4c6ec8 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go @@ -142,18 +142,18 @@ type PowerVSMachineProviderConfig struct { // a validation error. // +union type PowerVSResource struct { - // Type identifies the resource type for this entry. + // type identifies the resource type for this entry. // Valid values are ID, Name and RegEx // +kubebuilder:validation:Enum:=ID;Name;RegEx // +optional Type PowerVSResourceType `json:"type,omitempty"` - // ID of resource + // id of resource // +optional ID *string `json:"id,omitempty"` - // Name of resource + // name of resource // +optional Name *string `json:"name,omitempty"` - // Regex to find resource + // regex to find resource // Regex contains the pattern to match to find a resource // +optional RegEx *string `json:"regex,omitempty"` @@ -170,12 +170,10 @@ type PowerVSMachineProviderStatus struct { // conditions is a set of conditions associated with the Machine to indicate // errors or other status - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // instanceId is the instance ID of the machine created in PowerVS // instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. @@ -200,7 +198,7 @@ type PowerVSMachineProviderStatus struct { // referenced secret inside the same namespace. // +structType=atomic type PowerVSSecretReference struct { - // Name of the secret. + // name of the secret. // +optional Name string `json:"name,omitempty"` } @@ -211,7 +209,7 @@ type LoadBalancerReference struct { // The name should be between 1 and 63 characters long and may consist of lowercase alphanumeric characters and hyphens only. // The value must not end with a hyphen. // It is a reference to existing LoadBalancer created by openshift installer component. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern=`^([a-z]|[a-z][-a-z0-9]*[a-z0-9]|[0-9][-a-z0-9]*([a-z]|[-a-z][-a-z0-9]*[a-z0-9]))$` // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 @@ -221,7 +219,7 @@ type LoadBalancerReference struct { // More details about Application LoadBalancer // https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about&interface=ui // Supported values are Application. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:="Application" Type IBMVPCLoadBalancerType `json:"type"` } diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml index b001170fa..7be04ec84 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml @@ -7,6 +7,7 @@ controlplanemachinesets.machine.openshift.io: Capability: MachineAPI Category: "" FeatureGates: + - CPMSMachineNamePrefix - MachineAPIMigration FilenameOperatorName: control-plane-machine-set FilenameOperatorOrdering: "01" diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go index 32b86a7e9..c0b8c4ce4 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -20,15 +20,15 @@ var map_AlibabaCloudMachineProviderConfig = map[string]string{ "zoneId": "The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list.", "imageId": "The ID of the image used to create the instance.", "dataDisk": "DataDisks holds information regarding the extra disks attached to the instance", - "securityGroups": "SecurityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", - "bandwidth": "Bandwidth describes the internet bandwidth strategy for the instance", - "systemDisk": "SystemDisk holds the properties regarding the system disk for the instance", - "vSwitch": "VSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", - "ramRoleName": "RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", - "resourceGroup": "ResourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", - "tenancy": "Tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", + "securityGroups": "securityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", + "bandwidth": "bandwidth describes the internet bandwidth strategy for the instance", + "systemDisk": "systemDisk holds the properties regarding the system disk for the instance", + "vSwitch": "vSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", + "ramRoleName": "ramRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", + "resourceGroup": "resourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", + "tenancy": "tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", "tag": "Tags are the set of metadata to add to an instance.", } @@ -48,9 +48,9 @@ func (AlibabaCloudMachineProviderConfigList) SwaggerDoc() map[string]string { var map_AlibabaCloudMachineProviderStatus = map[string]string{ "": "AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "instanceId": "InstanceID is the instance ID of the machine created in alibabacloud", - "instanceState": "InstanceState is the state of the alibabacloud instance for this machine", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the instance ID of the machine created in alibabacloud", + "instanceState": "instanceState is the state of the alibabacloud instance for this machine", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", } func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string { var map_AlibabaResourceReference = map[string]string{ "": "ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. Only one of ID or Tags may be specified. Specifying more than one will result in a validation error.", "type": "type identifies the resource reference type for this entry.", - "id": "ID of resource", - "name": "Name of the resource", - "tags": "Tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", + "id": "id of resource", + "name": "name of the resource", + "tags": "tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", } func (AlibabaResourceReference) SwaggerDoc() map[string]string { @@ -71,8 +71,8 @@ func (AlibabaResourceReference) SwaggerDoc() map[string]string { var map_BandwidthProperties = map[string]string{ "": "Bandwidth describes the bandwidth strategy for the network of the instance", - "internetMaxBandwidthIn": "InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", - "internetMaxBandwidthOut": "InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", + "internetMaxBandwidthIn": "internetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", + "internetMaxBandwidthOut": "internetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", } func (BandwidthProperties) SwaggerDoc() map[string]string { @@ -97,10 +97,10 @@ func (DataDiskProperties) SwaggerDoc() map[string]string { var map_SystemDiskProperties = map[string]string{ "": "SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category", - "category": "Category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", - "performanceLevel": "PerformanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", - "name": "Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", - "size": "Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", + "category": "category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "performanceLevel": "performanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", + "name": "name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "size": "size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", } func (SystemDiskProperties) SwaggerDoc() map[string]string { @@ -119,8 +119,8 @@ func (Tag) SwaggerDoc() map[string]string { var map_AWSResourceFilter = map[string]string{ "": "AWSResourceFilter is a filter used to identify an AWS resource", - "name": "Name of the filter. Filter names are case-sensitive.", - "values": "Values includes one or more filter values. Filter values are case-sensitive.", + "name": "name of the filter. Filter names are case-sensitive.", + "values": "values includes one or more filter values. Filter values are case-sensitive.", } func (AWSResourceFilter) SwaggerDoc() map[string]string { @@ -129,10 +129,10 @@ func (AWSResourceFilter) SwaggerDoc() map[string]string { var map_AWSResourceReference = map[string]string{ "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", - "type": "Type determines how the reference will fetch the AWS resource.", - "id": "ID of resource.", - "arn": "ARN of resource.", - "filters": "Filters is a set of filters used to identify a resource.", + "type": "type determines how the reference will fetch the AWS resource.", + "id": "id of resource.", + "arn": "arn of resource.", + "filters": "filters is a set of filters used to identify a resource.", } func (AWSResourceReference) SwaggerDoc() map[string]string { @@ -141,8 +141,8 @@ func (AWSResourceReference) SwaggerDoc() map[string]string { var map_AWSFailureDomain = map[string]string{ "": "AWSFailureDomain configures failure domain information for the AWS platform.", - "subnet": "Subnet is a reference to the subnet to use for this instance.", - "placement": "Placement configures the placement information for this instance.", + "subnet": "subnet is a reference to the subnet to use for this instance.", + "placement": "placement configures the placement information for this instance.", } func (AWSFailureDomain) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ func (AWSFailureDomain) SwaggerDoc() map[string]string { var map_AWSFailureDomainPlacement = map[string]string{ "": "AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain.", - "availabilityZone": "AvailabilityZone is the availability zone of the instance.", + "availabilityZone": "availabilityZone is the availability zone of the instance.", } func (AWSFailureDomainPlacement) SwaggerDoc() map[string]string { @@ -187,12 +187,13 @@ func (ControlPlaneMachineSetList) SwaggerDoc() map[string]string { } var map_ControlPlaneMachineSetSpec = map[string]string{ - "": "ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.", - "state": "State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", - "replicas": "Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", - "strategy": "Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", - "selector": "Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource.", - "template": "Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", + "": "ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.", + "machineNamePrefix": "machineNamePrefix is the prefix used when creating machine names. Each machine name will consist of this prefix, followed by a randomly generated string of 5 characters, and the index of the machine. It must be a lowercase RFC 1123 subdomain, consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted. The prefix must be between 1 and 245 characters in length. For example, if machineNamePrefix is set to 'control-plane', and three machines are created, their names might be: control-plane-abcde-0, control-plane-fghij-1, control-plane-klmno-2", + "state": "state defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", + "replicas": "replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", + "strategy": "strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", + "selector": "Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource.", + "template": "template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", } func (ControlPlaneMachineSetSpec) SwaggerDoc() map[string]string { @@ -201,12 +202,12 @@ func (ControlPlaneMachineSetSpec) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetStatus = map[string]string{ "": "ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD.", - "conditions": "Conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", - "observedGeneration": "ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", - "replicas": "Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", - "readyReplicas": "ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", - "updatedReplicas": "UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", - "unavailableReplicas": "UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", + "conditions": "conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", + "observedGeneration": "observedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", + "readyReplicas": "readyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", + "updatedReplicas": "updatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", + "unavailableReplicas": "unavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", } func (ControlPlaneMachineSetStatus) SwaggerDoc() map[string]string { @@ -215,7 +216,7 @@ func (ControlPlaneMachineSetStatus) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetStrategy = map[string]string{ "": "ControlPlaneMachineSetStrategy defines the strategy for applying updates to the Control Plane Machines managed by the ControlPlaneMachineSet.", - "type": "Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", + "type": "type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", } func (ControlPlaneMachineSetStrategy) SwaggerDoc() map[string]string { @@ -224,7 +225,7 @@ func (ControlPlaneMachineSetStrategy) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetTemplate = map[string]string{ "": "ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet to create the Machines that it will manage in the future. ", - "machineType": "MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", + "machineType": "machineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", "machines_v1beta1_machine_openshift_io": "OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group.", } @@ -235,7 +236,7 @@ func (ControlPlaneMachineSetTemplate) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetTemplateObjectMeta = map[string]string{ "": "ControlPlaneMachineSetTemplateObjectMeta is a subset of the metav1.ObjectMeta struct. It allows users to specify labels and annotations that will be copied onto Machines created from this template.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'. It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'.", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "annotations": "annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", } func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { @@ -244,12 +245,12 @@ func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { var map_FailureDomains = map[string]string{ "": "FailureDomain represents the different configurations required to spread Machines across failure domains on different platforms.", - "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", - "aws": "AWS configures failure domain information for the AWS platform.", - "azure": "Azure configures failure domain information for the Azure platform.", - "gcp": "GCP configures failure domain information for the GCP platform.", + "platform": "platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", + "aws": "aws configures failure domain information for the AWS platform.", + "azure": "azure configures failure domain information for the Azure platform.", + "gcp": "gcp configures failure domain information for the GCP platform.", "vsphere": "vsphere configures failure domain information for the VSphere platform.", - "openstack": "OpenStack configures failure domain information for the OpenStack platform.", + "openstack": "openstack configures failure domain information for the OpenStack platform.", "nutanix": "nutanix configures failure domain information for the Nutanix platform.", } @@ -259,7 +260,7 @@ func (FailureDomains) SwaggerDoc() map[string]string { var map_GCPFailureDomain = map[string]string{ "": "GCPFailureDomain configures failure domain information for the GCP platform", - "zone": "Zone is the zone in which the GCP machine provider will create the VM.", + "zone": "zone is the zone in which the GCP machine provider will create the VM.", } func (GCPFailureDomain) SwaggerDoc() map[string]string { @@ -277,9 +278,9 @@ func (NutanixFailureDomainReference) SwaggerDoc() map[string]string { var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{ "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.", - "failureDomains": "FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", + "failureDomains": "failureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", "metadata": "ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.", - "spec": "Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", + "spec": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", } func (OpenShiftMachineV1Beta1MachineTemplate) SwaggerDoc() map[string]string { @@ -372,7 +373,7 @@ func (NutanixMachineProviderStatus) SwaggerDoc() map[string]string { var map_NutanixResourceIdentifier = map[string]string{ "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", - "type": "Type is the identifier type to use for this resource.", + "type": "type is the identifier type to use for this resource.", "uuid": "uuid is the UUID of the resource in the PC.", "name": "name is the resource name in the PC", } @@ -467,10 +468,10 @@ func (PowerVSMachineProviderStatus) SwaggerDoc() map[string]string { var map_PowerVSResource = map[string]string{ "": "PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx Only one of ID, Name or RegEx may be specified. Specifying more than one will result in a validation error.", - "type": "Type identifies the resource type for this entry. Valid values are ID, Name and RegEx", - "id": "ID of resource", - "name": "Name of resource", - "regex": "Regex to find resource Regex contains the pattern to match to find a resource", + "type": "type identifies the resource type for this entry. Valid values are ID, Name and RegEx", + "id": "id of resource", + "name": "name of resource", + "regex": "regex to find resource Regex contains the pattern to match to find a resource", } func (PowerVSResource) SwaggerDoc() map[string]string { @@ -479,7 +480,7 @@ func (PowerVSResource) SwaggerDoc() map[string]string { var map_PowerVSSecretReference = map[string]string{ "": "PowerVSSecretReference contains enough information to locate the referenced secret inside the same namespace.", - "name": "Name of the secret.", + "name": "name of the secret.", } func (PowerVSSecretReference) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go index da5fbc515..7b7f80810 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go @@ -51,6 +51,7 @@ type OpenstackProviderSpec struct { KeyName string `json:"keyName,omitempty"` // The machine ssh username + // Deprecated: sshUserName is silently ignored. SshUserName string `json:"sshUserName,omitempty"` // A networks object. Required parameter when there are multiple networks defined for the tenant. @@ -108,6 +109,7 @@ type OpenstackProviderSpec struct { ServerGroupName string `json:"serverGroupName,omitempty"` // The subnet that a set of machines will get ingress/egress traffic from + // Deprecated: primarySubnet is silently ignored. Use subnets instead. PrimarySubnet string `json:"primarySubnet,omitempty"` } @@ -161,14 +163,15 @@ type NetworkParam struct { // The UUID of the network. Required if you omit the port attribute. UUID string `json:"uuid,omitempty"` // A fixed IPv4 address for the NIC. + // Deprecated: fixedIP is silently ignored. Use subnets instead. FixedIp string `json:"fixedIp,omitempty"` // Filters for optional network query Filter Filter `json:"filter,omitempty"` // Subnet within a network to use Subnets []SubnetParam `json:"subnets,omitempty"` - // NoAllowedAddressPairs disables creation of allowed address pairs for the network ports + // noAllowedAddressPairs disables creation of allowed address pairs for the network ports NoAllowedAddressPairs bool `json:"noAllowedAddressPairs,omitempty"` - // PortTags allows users to specify a list of tags to add to ports created in a given network + // portTags allows users to specify a list of tags to add to ports created in a given network PortTags []string `json:"portTags,omitempty"` // The virtual network interface card (vNIC) type that is bound to the // neutron port. @@ -177,7 +180,7 @@ type NetworkParam struct { // host to pass and receive virtual network interface (VIF) port-specific // information to the plug-in. Profile map[string]string `json:"profile,omitempty"` - // PortSecurity optionally enables or disables security on ports managed by OpenStack + // portSecurity optionally enables or disables security on ports managed by OpenStack PortSecurity *bool `json:"portSecurity,omitempty"` } @@ -229,10 +232,11 @@ type SubnetParam struct { // Filters for optional network query Filter SubnetFilter `json:"filter,omitempty"` - // PortTags are tags that are added to ports created on this subnet + // portTags are tags that are added to ports created on this subnet PortTags []string `json:"portTags,omitempty"` - // PortSecurity optionally enables or disables security on ports managed by OpenStack + // portSecurity optionally enables or disables security on ports managed by OpenStack + // Deprecated: portSecurity is silently ignored. Set portSecurity on the parent network instead. PortSecurity *bool `json:"portSecurity,omitempty"` } @@ -263,6 +267,7 @@ type SubnetFilter struct { // ipv6RaMode filters subnets by IPv6 router adversiement mode. IPv6RAMode string `json:"ipv6RaMode,omitempty"` // subnetpoolId filters subnets by subnet pool ID. + // Deprecated: subnetpoolId is silently ignored. SubnetPoolID string `json:"subnetpoolId,omitempty"` // tags filters by subnets containing all specified tags. // Multiple tags are comma separated. @@ -307,11 +312,12 @@ type PortOpts struct { // tenantID specifies the tenant ID of the created port. Note that this // requires OpenShift to have administrative permissions, which is // typically not the case. Use of this field is not recommended. - // Deprecated: use projectID instead. It will be ignored if projectID is set. + // Deprecated: tenantID is silently ignored. TenantID string `json:"tenantID,omitempty"` // projectID specifies the project ID of the created port. Note that this // requires OpenShift to have administrative permissions, which is // typically not the case. Use of this field is not recommended. + // Deprecated: projectID is silently ignored. ProjectID string `json:"projectID,omitempty"` // securityGroups specifies a set of security group UUIDs to use instead // of the machine's default security groups. The default security groups @@ -362,7 +368,7 @@ type RootVolume struct { // volumeType specifies a volume type to use when creating the root // volume. If not specified the default volume type will be used. VolumeType string `json:"volumeType,omitempty"` - // diskSize specifies the size, in GB, of the created root volume. + // diskSize specifies the size, in GiB, of the created root volume. Size int `json:"diskSize,omitempty"` // availabilityZone specifies the Cinder availability where the root volume will be created. Zone string `json:"availabilityZone,omitempty"` @@ -379,7 +385,7 @@ type RootVolume struct { type BlockDeviceStorage struct { // type is the type of block device to create. // This can be either "Volume" or "Local". - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type BlockDeviceType `json:"type"` @@ -414,16 +420,16 @@ type AdditionalBlockDevice struct { // Also, this name will be used for tagging the block device. // Information about the block device tag can be obtained from the OpenStack // metadata API or the config drive. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // sizeGiB is the size of the block device in gibibytes (GiB). - // +kubebuilder:validation:Required + // +required SizeGiB int `json:"sizeGiB"` // storage specifies the storage type of the block device and // additional storage options. - // +kubebuilder:validation:Required + // +required Storage BlockDeviceStorage `json:"storage"` } diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go index c8094eb26..1062bc6de 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go @@ -76,14 +76,14 @@ func (FixedIPs) SwaggerDoc() map[string]string { var map_NetworkParam = map[string]string{ "uuid": "The UUID of the network. Required if you omit the port attribute.", - "fixedIp": "A fixed IPv4 address for the NIC.", + "fixedIp": "A fixed IPv4 address for the NIC. Deprecated: fixedIP is silently ignored. Use subnets instead.", "filter": "Filters for optional network query", "subnets": "Subnet within a network to use", - "noAllowedAddressPairs": "NoAllowedAddressPairs disables creation of allowed address pairs for the network ports", - "portTags": "PortTags allows users to specify a list of tags to add to ports created in a given network", + "noAllowedAddressPairs": "noAllowedAddressPairs disables creation of allowed address pairs for the network ports", + "portTags": "portTags allows users to specify a list of tags to add to ports created in a given network", "vnicType": "The virtual network interface card (vNIC) type that is bound to the neutron port.", "profile": "A dictionary that enables the application running on the specified host to pass and receive virtual network interface (VIF) port-specific information to the plug-in.", - "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack", + "portSecurity": "portSecurity optionally enables or disables security on ports managed by OpenStack", } func (NetworkParam) SwaggerDoc() map[string]string { @@ -98,7 +98,7 @@ var map_OpenstackProviderSpec = map[string]string{ "flavor": "The flavor reference for the flavor for your server instance.", "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.", "keyName": "The ssh key to inject in the instance", - "sshUserName": "The machine ssh username", + "sshUserName": "The machine ssh username Deprecated: sshUserName is silently ignored.", "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.", "ports": "Create and assign additional ports to instances", "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.", @@ -113,7 +113,7 @@ var map_OpenstackProviderSpec = map[string]string{ "additionalBlockDevices": "additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance", "serverGroupID": "The server group to assign the machine to.", "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.", - "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from", + "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from Deprecated: primarySubnet is silently ignored. Use subnets instead.", } func (OpenstackProviderSpec) SwaggerDoc() map[string]string { @@ -127,8 +127,8 @@ var map_PortOpts = map[string]string{ "adminStateUp": "adminStateUp sets the administrative state of the created port to up (true), or down (false).", "macAddress": "macAddress specifies the MAC address of the created port.", "fixedIPs": "fixedIPs specifies a set of fixed IPs to assign to the port. They must all be valid for the port's network.", - "tenantID": "tenantID specifies the tenant ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: use projectID instead. It will be ignored if projectID is set.", - "projectID": "projectID specifies the project ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended.", + "tenantID": "tenantID specifies the tenant ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: tenantID is silently ignored.", + "projectID": "projectID specifies the project ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: projectID is silently ignored.", "securityGroups": "securityGroups specifies a set of security group UUIDs to use instead of the machine's default security groups. The default security groups will be used if this is left empty or not specified.", "allowedAddressPairs": "allowedAddressPairs specifies a set of allowed address pairs to add to the port.", "tags": "tags species a set of tags to add to the port.", @@ -146,7 +146,7 @@ func (PortOpts) SwaggerDoc() map[string]string { var map_RootVolume = map[string]string{ "sourceUUID": "sourceUUID specifies the UUID of a glance image used to populate the root volume. Deprecated: set image in the platform spec instead. This will be ignored if image is set in the platform spec.", "volumeType": "volumeType specifies a volume type to use when creating the root volume. If not specified the default volume type will be used.", - "diskSize": "diskSize specifies the size, in GB, of the created root volume.", + "diskSize": "diskSize specifies the size, in GiB, of the created root volume.", "availabilityZone": "availabilityZone specifies the Cinder availability where the root volume will be created.", "sourceType": "Deprecated: sourceType will be silently ignored. There is no replacement.", "deviceType": "Deprecated: deviceType will be silently ignored. There is no replacement.", @@ -198,7 +198,7 @@ var map_SubnetFilter = map[string]string{ "cidr": "cidr filters subnets by CIDR.", "ipv6AddressMode": "ipv6AddressMode filters subnets by IPv6 address mode.", "ipv6RaMode": "ipv6RaMode filters subnets by IPv6 router adversiement mode.", - "subnetpoolId": "subnetpoolId filters subnets by subnet pool ID.", + "subnetpoolId": "subnetpoolId filters subnets by subnet pool ID. Deprecated: subnetpoolId is silently ignored.", "tags": "tags filters by subnets containing all specified tags. Multiple tags are comma separated.", "tagsAny": "tagsAny filters by subnets containing any specified tags. Multiple tags are comma separated.", "notTags": "notTags filters by subnets which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.", @@ -217,8 +217,8 @@ func (SubnetFilter) SwaggerDoc() map[string]string { var map_SubnetParam = map[string]string{ "uuid": "The UUID of the network. Required if you omit the port attribute.", "filter": "Filters for optional network query", - "portTags": "PortTags are tags that are added to ports created on this subnet", - "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack", + "portTags": "portTags are tags that are added to ports created on this subnet", + "portSecurity": "portSecurity optionally enables or disables security on ports managed by OpenStack Deprecated: portSecurity is silently ignored. Set portSecurity on the parent network instead.", } func (SubnetParam) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index b2c66b707..d69bcd023 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -13,38 +13,38 @@ type AWSMachineProviderConfig struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // AMI is the reference to the AMI from which to create the machine instance. + // ami is the reference to the AMI from which to create the machine instance. AMI AWSResourceReference `json:"ami"` - // InstanceType is the type of instance to create. Example: m4.xlarge + // instanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType"` - // Tags is the set of tags to add to apply to an instance, in addition to the ones + // tags is the set of tags to add to apply to an instance, in addition to the ones // added by default by the actuator. These tags are additive. The actuator will ensure // these tags are present, but will not remove any other tags that may exist on the // instance. // +optional Tags []TagSpecification `json:"tags,omitempty"` - // IAMInstanceProfile is a reference to an IAM role to assign to the instance + // iamInstanceProfile is a reference to an IAM role to assign to the instance // +optional IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions + // credentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions // provided by attached IAM role where the actuator is running. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // KeyName is the name of the KeyPair to use for SSH + // keyName is the name of the KeyPair to use for SSH // +optional KeyName *string `json:"keyName,omitempty"` - // DeviceIndex is the index of the device on the instance for the network interface attachment. + // deviceIndex is the index of the device on the instance for the network interface attachment. // Defaults to 0. DeviceIndex int64 `json:"deviceIndex"` - // PublicIP specifies whether the instance should get a public IP. If not present, + // publicIp specifies whether the instance should get a public IP. If not present, // it should use the default of its subnet. // +optional PublicIP *bool `json:"publicIp,omitempty"` - // NetworkInterfaceType specifies the type of network interface to be used for the primary + // networkInterfaceType specifies the type of network interface to be used for the primary // network interface. // Valid values are "ENA", "EFA", and omitted, which means no opinion and the platform // chooses a good default which may change over time. @@ -54,32 +54,32 @@ type AWSMachineProviderConfig struct { // +kubebuilder:validation:Enum:="ENA";"EFA" // +optional NetworkInterfaceType AWSNetworkInterfaceType `json:"networkInterfaceType,omitempty"` - // SecurityGroups is an array of references to security groups that should be applied to the + // securityGroups is an array of references to security groups that should be applied to the // instance. // +optional SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` - // Subnet is a reference to the subnet to use for this instance + // subnet is a reference to the subnet to use for this instance Subnet AWSResourceReference `json:"subnet"` - // Placement specifies where to create the instance in AWS + // placement specifies where to create the instance in AWS Placement Placement `json:"placement"` - // LoadBalancers is the set of load balancers to which the new instance + // loadBalancers is the set of load balancers to which the new instance // should be added once it is created. // +optional LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"` - // BlockDevices is the set of block device mapping associated to this instance, + // blockDevices is the set of block device mapping associated to this instance, // block device without a name will be used as a root device and only one device without a name is allowed // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html // +optional BlockDevices []BlockDeviceMappingSpec `json:"blockDevices,omitempty"` - // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // spotMarketOptions allows users to configure instances to be run using AWS Spot instances. // +optional SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` - // MetadataServiceOptions allows users to configure instance metadata service interaction options. + // metadataServiceOptions allows users to configure instance metadata service interaction options. // If nothing specified, default AWS IMDS settings will be applied. // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html // +optional MetadataServiceOptions MetadataServiceOptions `json:"metadataServiceOptions,omitempty"` - // PlacementGroupName specifies the name of the placement group in which to launch the instance. + // placementGroupName specifies the name of the placement group in which to launch the instance. // The placement group must already be created and may use any placement strategy. // When omitted, no placement group is used when creating the EC2 instance. // +optional @@ -95,6 +95,18 @@ type AWSMachineProviderConfig struct { // The field size should be greater than 0 and the field input must start with cr-*** // +optional CapacityReservationID string `json:"capacityReservationId"` + // marketType specifies the type of market for the EC2 instance. + // Valid values are OnDemand, Spot, CapacityBlock and omitted. + // + // Defaults to OnDemand. + // When SpotMarketOptions is provided, the marketType defaults to "Spot". + // + // When set to OnDemand the instance runs as a standard OnDemand instance. + // When set to Spot the instance runs as a Spot instance. + // When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. + // If this value is selected, capacityReservationID must be specified to identify the target reservation. + // +optional + MarketType MarketType `json:"marketType,omitempty"` } // BlockDeviceMappingSpec describes a block device mapping @@ -194,7 +206,7 @@ const ( // MetadataServiceOptions defines the options available to a user when configuring // Instance Metadata Service (IMDS) Options. type MetadataServiceOptions struct { - // Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. + // authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. // When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. // When omitted, this means the user has no opinion and the value is left to the platform to choose a good // default, which is subject to change over time. The current default is optional. @@ -209,26 +221,26 @@ type MetadataServiceOptions struct { // Only one of ID, ARN or Filters may be specified. Specifying more than one will result in // a validation error. type AWSResourceReference struct { - // ID of resource + // id of resource // +optional ID *string `json:"id,omitempty"` - // ARN of resource + // arn of resource // +optional ARN *string `json:"arn,omitempty"` - // Filters is a set of filters used to identify a resource + // filters is a set of filters used to identify a resource // +optional Filters []Filter `json:"filters,omitempty"` } // Placement indicates where to create the instance in AWS type Placement struct { - // Region is the region to use to create the instance + // region is the region to use to create the instance // +optional Region string `json:"region,omitempty"` - // AvailabilityZone is the availability zone of the instance + // availabilityZone is the availability zone of the instance // +optional AvailabilityZone string `json:"availabilityZone,omitempty"` - // Tenancy indicates if instance should run on shared or single-tenant hardware. There are + // tenancy indicates if instance should run on shared or single-tenant hardware. There are // supported 3 options: default, dedicated and host. // +optional Tenancy InstanceTenancy `json:"tenancy,omitempty"` @@ -236,18 +248,18 @@ type Placement struct { // Filter is a filter used to identify an AWS resource type Filter struct { - // Name of the filter. Filter names are case-sensitive. + // name of the filter. Filter names are case-sensitive. Name string `json:"name"` - // Values includes one or more filter values. Filter values are case-sensitive. + // values includes one or more filter values. Filter values are case-sensitive. // +optional Values []string `json:"values,omitempty"` } // TagSpecification is the name/value pair for a tag type TagSpecification struct { - // Name of the tag + // name of the tag Name string `json:"name"` - // Value of the tag + // value of the tag Value string `json:"value"` } @@ -309,14 +321,35 @@ const ( // +openshift:compatibility-gen:level=2 type AWSMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` - // InstanceID is the instance ID of the machine created in AWS + // instanceId is the instance ID of the machine created in AWS // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the state of the AWS instance for this machine + // instanceState is the state of the AWS instance for this machine // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } + +// MarketType describes the market type of an EC2 Instance +// +kubebuilder:validation:Enum:=OnDemand;Spot;CapacityBlock +type MarketType string + +const ( + + // MarketTypeOnDemand is a MarketType enum value + // When set to OnDemand the instance runs as a standard OnDemand instance. + MarketTypeOnDemand MarketType = "OnDemand" + + // MarketTypeSpot is a MarketType enum value + // When set to Spot the instance runs as a Spot instance. + MarketTypeSpot MarketType = "Spot" + + // MarketTypeCapacityBlock is a MarketType enum value + // When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. + MarketTypeCapacityBlock MarketType = "CapacityBlock" +) diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go index 00a9497fd..760360bd5 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -39,32 +39,32 @@ type AzureMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with Azure credentials. + // credentialsSecret is a reference to the secret with Azure credentials. // +optional CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` - // Location is the region to use to create the instance + // location is the region to use to create the instance // +optional Location string `json:"location,omitempty"` - // VMSize is the size of the VM to create. + // vmSize is the size of the VM to create. // +optional VMSize string `json:"vmSize,omitempty"` - // Image is the OS image to use to create the instance. + // image is the OS image to use to create the instance. Image Image `json:"image"` - // OSDisk represents the parameters for creating the OS disk. + // osDisk represents the parameters for creating the OS disk. OSDisk OSDisk `json:"osDisk"` // DataDisk specifies the parameters that are used to add one or more data disks to the machine. // +optional DataDisks []DataDisk `json:"dataDisks,omitempty"` - // SSHPublicKey is the public key to use to SSH to the virtual machine. + // sshPublicKey is the public key to use to SSH to the virtual machine. // +optional SSHPublicKey string `json:"sshPublicKey,omitempty"` - // PublicIP if true a public IP will be used + // publicIP if true a public IP will be used PublicIP bool `json:"publicIP"` - // Tags is a list of tags to apply to the machine. + // tags is a list of tags to apply to the machine. // +optional Tags map[string]string `json:"tags,omitempty"` // Network Security Group that needs to be attached to the machine's interface. @@ -75,40 +75,40 @@ type AzureMachineProviderSpec struct { // No application security groups will be attached if zero-length. // +optional ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"` - // Subnet to use for this instance + // subnet to use for this instance Subnet string `json:"subnet"` - // PublicLoadBalancer to use for this instance + // publicLoadBalancer to use for this instance // +optional PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"` // InternalLoadBalancerName to use for this instance // +optional InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"` - // NatRule to set inbound NAT rule of the load balancer + // natRule to set inbound NAT rule of the load balancer // +optional NatRule *int64 `json:"natRule,omitempty"` - // ManagedIdentity to set managed identity name + // managedIdentity to set managed identity name // +optional ManagedIdentity string `json:"managedIdentity,omitempty"` - // Vnet to set virtual network name + // vnet to set virtual network name // +optional Vnet string `json:"vnet,omitempty"` // Availability Zone for the virtual machine. // If nil, the virtual machine should be deployed to no zone // +optional Zone string `json:"zone,omitempty"` - // NetworkResourceGroup is the resource group for the virtual machine's network + // networkResourceGroup is the resource group for the virtual machine's network // +optional NetworkResourceGroup string `json:"networkResourceGroup,omitempty"` - // ResourceGroup is the resource group for the virtual machine + // resourceGroup is the resource group for the virtual machine // +optional ResourceGroup string `json:"resourceGroup,omitempty"` - // SpotVMOptions allows the ability to specify the Machine should use a Spot VM + // spotVMOptions allows the ability to specify the Machine should use a Spot VM // +optional SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"` - // SecurityProfile specifies the Security profile settings for a virtual machine. + // securityProfile specifies the Security profile settings for a virtual machine. // +optional SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` - // UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. + // ultraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. // This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. // This Azure feature is subject to a specific scope and certain limitations. // More informations on this can be found in the official Azure documentation for Ultra Disks: @@ -130,16 +130,16 @@ type AzureMachineProviderSpec struct { // +kubebuilder:validation:Enum:="Enabled";"Disabled" // +optional UltraSSDCapability AzureUltraSSDCapabilityState `json:"ultraSSDCapability,omitempty"` - // AcceleratedNetworking enables or disables Azure accelerated networking feature. + // acceleratedNetworking enables or disables Azure accelerated networking feature. // Set to false by default. If true, then this will depend on whether the requested // VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error. // +optional AcceleratedNetworking bool `json:"acceleratedNetworking,omitempty"` - // AvailabilitySet specifies the availability set to use for this instance. + // availabilitySet specifies the availability set to use for this instance. // Availability set should be precreated, before using this field. // +optional AvailabilitySet string `json:"availabilitySet,omitempty"` - // Diagnostics configures the diagnostics settings for the virtual machine. + // diagnostics configures the diagnostics settings for the virtual machine. // This allows you to configure boot diagnostics such as capturing serial output from // the virtual machine on boot. // This is useful for debugging software based launch issues. @@ -156,7 +156,7 @@ type AzureMachineProviderSpec struct { // SpotVMOptions defines the options relevant to running the Machine on Spot VMs type SpotVMOptions struct { - // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + // maxPrice defines the maximum price the user is willing to pay for Spot VM instances // +optional MaxPrice *resource.Quantity `json:"maxPrice,omitempty"` } @@ -177,13 +177,13 @@ type AzureDiagnostics struct { // This is useful for debugging software based launch issues. // +union type AzureBootDiagnostics struct { - // StorageAccountType determines if the storage account for storing the diagnostics data + // storageAccountType determines if the storage account for storing the diagnostics data // should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged). - // +kubebuilder:validation:Required + // +required // +unionDiscriminator StorageAccountType AzureBootDiagnosticsStorageAccountType `json:"storageAccountType"` - // CustomerManaged provides reference to the customer manager storage account. + // customerManaged provides reference to the customer manager storage account. // +optional CustomerManaged *AzureCustomerManagedBootDiagnostics `json:"customerManaged,omitempty"` } @@ -191,12 +191,12 @@ type AzureBootDiagnostics struct { // AzureCustomerManagedBootDiagnostics provides reference to a customer managed // storage account. type AzureCustomerManagedBootDiagnostics struct { - // StorageAccountURI is the URI of the customer managed storage account. + // storageAccountURI is the URI of the customer managed storage account. // The URI typically will be `https://.blob.core.windows.net/` // but may differ if you are using Azure DNS zone endpoints. // You can find the correct endpoint by looking for the Blob Primary Endpoint in the // endpoints tab in the Azure console. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern=`^https://` // +kubebuilder:validation:MaxLength=1024 StorageAccountURI string `json:"storageAccountURI"` @@ -225,15 +225,17 @@ type AzureMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // VMID is the ID of the virtual machine created in Azure. + // vmId is the ID of the virtual machine created in Azure. // +optional VMID *string `json:"vmId,omitempty"` - // VMState is the provisioning state of the Azure virtual machine. + // vmState is the provisioning state of the Azure virtual machine. // +optional VMState *AzureVMState `json:"vmState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status. // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -274,23 +276,23 @@ const ( // Image is a mirror of azure sdk compute.ImageReference type Image struct { - // Publisher is the name of the organization that created the image + // publisher is the name of the organization that created the image Publisher string `json:"publisher"` - // Offer specifies the name of a group of related images created by the publisher. + // offer specifies the name of a group of related images created by the publisher. // For example, UbuntuServer, WindowsServer Offer string `json:"offer"` - // SKU specifies an instance of an offer, such as a major release of a distribution. + // sku specifies an instance of an offer, such as a major release of a distribution. // For example, 18.04-LTS, 2019-Datacenter SKU string `json:"sku"` - // Version specifies the version of an image sku. The allowed formats + // version specifies the version of an image sku. The allowed formats // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. // Specify 'latest' to use the latest version of an image available at deploy time. // Even if you use 'latest', the VM image will not automatically update after deploy // time even if a new version becomes available. Version string `json:"version"` - // ResourceID specifies an image to use by ID + // resourceID specifies an image to use by ID ResourceID string `json:"resourceID"` - // Type identifies the source of the image and related information, such as purchase plans. + // type identifies the source of the image and related information, such as purchase plans. // Valid values are "ID", "MarketplaceWithPlan", "MarketplaceNoPlan", and omitted, which // means no opinion and the platform chooses a good default which may change over time. // Currently that default is "MarketplaceNoPlan" if publisher data is supplied, or "ID" if not. @@ -313,16 +315,16 @@ const ( ) type OSDisk struct { - // OSType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". + // osType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". OSType string `json:"osType"` - // ManagedDisk specifies the Managed Disk parameters for the OS disk. + // managedDisk specifies the Managed Disk parameters for the OS disk. ManagedDisk OSDiskManagedDiskParameters `json:"managedDisk"` - // DiskSizeGB is the size in GB to assign to the data disk. + // diskSizeGB is the size in GB to assign to the data disk. DiskSizeGB int32 `json:"diskSizeGB"` - // DiskSettings describe ephemeral disk settings for the os disk. + // diskSettings describe ephemeral disk settings for the os disk. // +optional DiskSettings DiskSettings `json:"diskSettings,omitempty"` - // CachingType specifies the caching requirements. + // cachingType specifies the caching requirements. // Possible values include: 'None', 'ReadOnly', 'ReadWrite'. // Empty value means no opinion and the platform chooses a default, which is subject to change over // time. Currently the default is `None`. @@ -342,43 +344,43 @@ type OSDisk struct { // Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. // For further guidance and examples, please refer to the official OpenShift docs. type DataDisk struct { - // NameSuffix is the suffix to be appended to the machine name to generate the disk name. + // nameSuffix is the suffix to be appended to the machine name to generate the disk name. // Each disk name will be in format _. // NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. // The overall disk name must not exceed 80 chars in length. // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9](?:[\w\.-]*[a-zA-Z0-9])?$` // +kubebuilder:validation:MaxLength:=78 - // +kubebuilder:validation:Required + // +required NameSuffix string `json:"nameSuffix"` - // DiskSizeGB is the size in GB to assign to the data disk. + // diskSizeGB is the size in GB to assign to the data disk. // +kubebuilder:validation:Minimum=4 - // +kubebuilder:validation:Required + // +required DiskSizeGB int32 `json:"diskSizeGB"` - // ManagedDisk specifies the Managed Disk parameters for the data disk. + // managedDisk specifies the Managed Disk parameters for the data disk. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is a ManagedDisk with with storageAccountType: "Premium_LRS" and diskEncryptionSet.id: "Default". // +optional ManagedDisk DataDiskManagedDiskParameters `json:"managedDisk,omitempty"` - // Lun Specifies the logical unit number of the data disk. + // lun Specifies the logical unit number of the data disk. // This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. // This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). // The value must be between 0 and 63. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=63 - // +kubebuilder:validation:Required + // +required Lun int32 `json:"lun,omitempty"` - // CachingType specifies the caching requirements. + // cachingType specifies the caching requirements. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is CachingTypeNone. // +optional // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite CachingType CachingTypeOption `json:"cachingType,omitempty"` - // DeletionPolicy specifies the data disk deletion policy upon Machine deletion. + // deletionPolicy specifies the data disk deletion policy upon Machine deletion. // Possible values are "Delete","Detach". // When "Delete" is used the data disk is deleted when the Machine is deleted. // When "Detach" is used the data disk is detached from the Machine and retained when the Machine is deleted. // +kubebuilder:validation:Enum=Delete;Detach - // +kubebuilder:validation:Required + // +required DeletionPolicy DiskDeletionPolicyType `json:"deletionPolicy"` } @@ -408,7 +410,7 @@ const ( // DiskSettings describe ephemeral disk settings for the os disk. type DiskSettings struct { - // EphemeralStorageLocation enables ephemeral OS when set to 'Local'. + // ephemeralStorageLocation enables ephemeral OS when set to 'Local'. // Possible values include: 'Local'. // See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. // Empty value means no opinion and the platform chooses a default, which is subject to change over @@ -420,10 +422,10 @@ type DiskSettings struct { // OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk. type OSDiskManagedDiskParameters struct { - // StorageAccountType is the storage account type to use. + // storageAccountType is the storage account type to use. // Possible values include "Standard_LRS", "Premium_LRS". StorageAccountType string `json:"storageAccountType"` - // DiskEncryptionSet is the disk encryption set properties + // diskEncryptionSet is the disk encryption set properties // +optional DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` // securityProfile specifies the security profile for the managed disk. @@ -460,11 +462,11 @@ type VMDiskSecurityProfile struct { // DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk. type DataDiskManagedDiskParameters struct { - // StorageAccountType is the storage account type to use. + // storageAccountType is the storage account type to use. // Possible values include "Standard_LRS", "Premium_LRS" and "UltraSSD_LRS". // +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;UltraSSD_LRS StorageAccountType StorageAccountType `json:"storageAccountType"` - // DiskEncryptionSet is the disk encryption set properties. + // diskEncryptionSet is the disk encryption set properties. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is a DiskEncryptionSet with id: "Default". // +optional @@ -486,7 +488,7 @@ const ( // DiskEncryptionSetParameters is the disk encryption set properties type DiskEncryptionSetParameters struct { - // ID is the disk encryption set ID + // id is the disk encryption set ID // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is: "Default". // +optional @@ -514,7 +516,7 @@ type SecuritySettings struct { // securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to // enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set. // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch - // +kubebuilder:validation:Required + // +required // +unionDiscriminator SecurityType SecurityTypes `json:"securityType,omitempty"` // confidentialVM specifies the security configuration of the virtual machine. @@ -532,14 +534,14 @@ type SecuritySettings struct { // ConfidentialVM defines the UEFI settings for the virtual machine. type ConfidentialVM struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. - // +kubebuilder:validation:Required + // +required UEFISettings UEFISettings `json:"uefiSettings,omitempty"` } // TrustedLaunch defines the UEFI settings for the virtual machine. type TrustedLaunch struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. - // +kubebuilder:validation:Required + // +required UEFISettings UEFISettings `json:"uefiSettings,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go index b5bb50619..72a31b5bd 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -63,6 +63,12 @@ const ( ConfidentialComputePolicyEnabled ConfidentialComputePolicy = "Enabled" // ConfidentialComputePolicyDisabled disables confidential compute for the GCP machine. ConfidentialComputePolicyDisabled ConfidentialComputePolicy = "Disabled" + // ConfidentialComputePolicySEV sets AMD SEV as the VM instance's confidential computing technology of choice. + ConfidentialComputePolicySEV ConfidentialComputePolicy = "AMDEncryptedVirtualization" + // ConfidentialComputePolicySEVSNP sets AMD SEV-SNP as the VM instance's confidential computing technology of choice. + ConfidentialComputePolicySEVSNP ConfidentialComputePolicy = "AMDEncryptedVirtualizationNestedPaging" + // ConfidentialComputePolicyTDX sets Intel TDX as the VM instance's confidential computing technology of choice. + ConfidentialComputePolicyTDX ConfidentialComputePolicy = "IntelTrustedDomainExtensions" ) // GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field @@ -76,61 +82,61 @@ type GCPMachineProviderSpec struct { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with GCP credentials. + // credentialsSecret is a reference to the secret with GCP credentials. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. + // canIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. // This is required if you plan to use this instance to forward routes. CanIPForward bool `json:"canIPForward"` - // DeletionProtection whether the resource should be protected against deletion. + // deletionProtection whether the resource should be protected against deletion. DeletionProtection bool `json:"deletionProtection"` - // Disks is a list of disks to be attached to the VM. + // disks is a list of disks to be attached to the VM. // +optional Disks []*GCPDisk `json:"disks,omitempty"` - // Labels list of labels to apply to the VM. + // labels list of labels to apply to the VM. // +optional Labels map[string]string `json:"labels,omitempty"` // Metadata key/value pairs to apply to the VM. // +optional Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"` - // NetworkInterfaces is a list of network interfaces to be attached to the VM. + // networkInterfaces is a list of network interfaces to be attached to the VM. // +optional NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` - // ServiceAccounts is a list of GCP service accounts to be used by the VM. + // serviceAccounts is a list of GCP service accounts to be used by the VM. ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` - // Tags list of network tags to apply to the VM. + // tags list of network tags to apply to the VM. Tags []string `json:"tags,omitempty"` - // TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, + // targetPools are used for network TCP/UDP load balancing. A target pool references member instances, // an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool // +optional TargetPools []string `json:"targetPools,omitempty"` - // MachineType is the machine type to use for the VM. + // machineType is the machine type to use for the VM. MachineType string `json:"machineType"` - // Region is the region in which the GCP machine provider will create the VM. + // region is the region in which the GCP machine provider will create the VM. Region string `json:"region"` - // Zone is the zone in which the GCP machine provider will create the VM. + // zone is the zone in which the GCP machine provider will create the VM. Zone string `json:"zone"` - // ProjectID is the project in which the GCP machine provider will create the VM. + // projectID is the project in which the GCP machine provider will create the VM. // +optional ProjectID string `json:"projectID,omitempty"` - // GPUs is a list of GPUs to be attached to the VM. + // gpus is a list of GPUs to be attached to the VM. // +optional GPUs []GCPGPUConfig `json:"gpus,omitempty"` - // Preemptible indicates if created instance is preemptible. + // preemptible indicates if created instance is preemptible. // +optional Preemptible bool `json:"preemptible,omitempty"` - // OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. + // onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. // This is required to be set to "Terminate" if you want to provision machine with attached GPUs. // Otherwise, allowed values are "Migrate" and "Terminate". // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Migrate". // +kubebuilder:validation:Enum=Migrate;Terminate; // +optional OnHostMaintenance GCPHostMaintenanceType `json:"onHostMaintenance,omitempty"` - // RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always"). + // restartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always"). // Cannot be "Always" with preemptible instances. // Otherwise, allowed values are "Always" and "Never". // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Always". @@ -139,14 +145,25 @@ type GCPMachineProviderSpec struct { // +optional RestartPolicy GCPRestartPolicyType `json:"restartPolicy,omitempty"` - // ShieldedInstanceConfig is the Shielded VM configuration for the VM + // shieldedInstanceConfig is the Shielded VM configuration for the VM // +optional ShieldedInstanceConfig GCPShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` - // confidentialCompute Defines whether the instance should have confidential compute enabled. - // If enabled OnHostMaintenance is required to be set to "Terminate". - // If omitted, the platform chooses a default, which is subject to change over time, currently that default is false. - // +kubebuilder:validation:Enum=Enabled;Disabled + // confidentialCompute is an optional field defining whether the instance should have confidential compute enabled or not, and the confidential computing technology of choice. + // Allowed values are omitted, Disabled, Enabled, AMDEncryptedVirtualization, AMDEncryptedVirtualizationNestedPaging, and IntelTrustedDomainExtensions + // When set to Disabled, the machine will not be configured to be a confidential computing instance. + // When set to Enabled, the machine will be configured as a confidential computing instance with no preference on the confidential compute policy used. In this mode, the platform chooses a default that is subject to change over time. Currently, the default is to use AMD Secure Encrypted Virtualization. + // When set to AMDEncryptedVirtualization, the machine will be configured as a confidential computing instance with AMD Secure Encrypted Virtualization (AMD SEV) as the confidential computing technology. + // When set to AMDEncryptedVirtualizationNestedPaging, the machine will be configured as a confidential computing instance with AMD Secure Encrypted Virtualization Secure Nested Paging (AMD SEV-SNP) as the confidential computing technology. + // When set to IntelTrustedDomainExtensions, the machine will be configured as a confidential computing instance with Intel Trusted Domain Extensions (Intel TDX) as the confidential computing technology. + // If any value other than Disabled is set the selected machine type must support that specific confidential computing technology. The machine series supporting confidential computing technologies can be checked at https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#all-confidential-vm-instances + // Currently, AMDEncryptedVirtualization is supported in c2d, n2d, and c3d machines. + // AMDEncryptedVirtualizationNestedPaging is supported in n2d machines. + // IntelTrustedDomainExtensions is supported in c3 machines. + // If any value other than Disabled is set, the selected region must support that specific confidential computing technology. The list of regions supporting confidential computing technologies can be checked at https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#supported-zones + // If any value other than Disabled is set onHostMaintenance is required to be set to "Terminate". + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled. + // +kubebuilder:validation:Enum="";Enabled;Disabled;AMDEncryptedVirtualization;AMDEncryptedVirtualizationNestedPaging;IntelTrustedDomainExtensions // +optional ConfidentialCompute ConfidentialComputePolicy `json:"confidentialCompute,omitempty"` @@ -169,7 +186,7 @@ type ResourceManagerTag struct { // An OrganizationID can have a maximum of 32 characters and must consist of decimal numbers, and // cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain // lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` @@ -178,7 +195,7 @@ type ResourceManagerTag struct { // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `._-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` @@ -187,7 +204,7 @@ type ResourceManagerTag struct { // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` @@ -196,48 +213,48 @@ type ResourceManagerTag struct { // GCPDisk describes disks for GCP. type GCPDisk struct { - // AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). + // autoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). AutoDelete bool `json:"autoDelete"` - // Boot indicates if this is a boot disk (default false). + // boot indicates if this is a boot disk (default false). Boot bool `json:"boot"` - // SizeGB is the size of the disk (in GB). + // sizeGb is the size of the disk (in GB). SizeGB int64 `json:"sizeGb"` - // Type is the type of the disk (eg: pd-standard). + // type is the type of the disk (eg: pd-standard). Type string `json:"type"` - // Image is the source image to create this disk. + // image is the source image to create this disk. Image string `json:"image"` - // Labels list of labels to apply to the disk. + // labels list of labels to apply to the disk. Labels map[string]string `json:"labels"` - // EncryptionKey is the customer-supplied encryption key of the disk. + // encryptionKey is the customer-supplied encryption key of the disk. // +optional EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` } // GCPMetadata describes metadata for GCP. type GCPMetadata struct { - // Key is the metadata key. + // key is the metadata key. Key string `json:"key"` - // Value is the metadata value. + // value is the metadata value. Value *string `json:"value"` } // GCPNetworkInterface describes network interfaces for GCP type GCPNetworkInterface struct { - // PublicIP indicates if true a public IP will be used + // publicIP indicates if true a public IP will be used PublicIP bool `json:"publicIP,omitempty"` - // Network is the network name. + // network is the network name. Network string `json:"network,omitempty"` - // ProjectID is the project in which the GCP machine provider will create the VM. + // projectID is the project in which the GCP machine provider will create the VM. ProjectID string `json:"projectID,omitempty"` - // Subnetwork is the subnetwork name. + // subnetwork is the subnetwork name. Subnetwork string `json:"subnetwork,omitempty"` } // GCPServiceAccount describes service accounts for GCP. type GCPServiceAccount struct { - // Email is the service account email. + // email is the service account email. Email string `json:"email"` - // Scopes list of scopes to be assigned to the service account. + // scopes list of scopes to be assigned to the service account. Scopes []string `json:"scopes"` } @@ -246,7 +263,7 @@ type GCPEncryptionKeyReference struct { // KMSKeyName is the reference KMS key, in the format // +optional KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` - // KMSKeyServiceAccount is the service account being used for the + // kmsKeyServiceAccount is the service account being used for the // encryption request for the given KMS key. If absent, the Compute // Engine default service account is used. // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account @@ -257,23 +274,23 @@ type GCPEncryptionKeyReference struct { // GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key type GCPKMSKeyReference struct { - // Name is the name of the customer managed encryption key to be used for the disk encryption. + // name is the name of the customer managed encryption key to be used for the disk encryption. Name string `json:"name"` - // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. + // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. KeyRing string `json:"keyRing"` - // ProjectID is the ID of the Project in which the KMS Key Ring exists. + // projectID is the ID of the Project in which the KMS Key Ring exists. // Defaults to the VM ProjectID if not set. // +optional ProjectID string `json:"projectID,omitempty"` - // Location is the GCP location in which the Key Ring exists. + // location is the GCP location in which the Key Ring exists. Location string `json:"location"` } // GCPGPUConfig describes type and count of GPUs attached to the instance on GCP. type GCPGPUConfig struct { - // Count is the number of GPUs to be attached to an instance. + // count is the number of GPUs to be attached to an instance. Count int32 `json:"count"` - // Type is the type of GPU to be attached to an instance. + // type is the type of GPU to be attached to an instance. // Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4 // +kubebuilder:validation:Pattern=`^nvidia-tesla-(k80|p100|v100|p4|t4)$` Type string `json:"type"` @@ -287,29 +304,31 @@ type GCPMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // InstanceID is the ID of the instance in GCP + // instanceId is the ID of the instance in GCP // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the provisioning state of the GCP Instance. + // instanceState is the provisioning state of the GCP Instance. // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } // GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. // Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring. type GCPShieldedInstanceConfig struct { - // SecureBoot Defines whether the instance should have secure boot enabled. + // secureBoot Defines whether the instance should have secure boot enabled. // Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled. // +kubebuilder:validation:Enum=Enabled;Disabled //+optional SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"` - // VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. + // virtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. // This is required to be set to "Enabled" if IntegrityMonitoring is enabled. // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled. @@ -317,7 +336,7 @@ type GCPShieldedInstanceConfig struct { // +optional VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"` - // IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. + // integrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. // Compares the most recent boot measurements to the integrity policy baseline and return // a pair of pass/fail results depending on whether they match or not. // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index c8fcb192b..9bd3bdd60 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -227,7 +227,7 @@ type MachineSpec struct { // +optional ObjectMeta `json:"metadata,omitempty"` - // LifecycleHooks allow users to pause operations on the machine at + // lifecycleHooks allow users to pause operations on the machine at // certain predefined points within the machine lifecycle. // +optional LifecycleHooks LifecycleHooks `json:"lifecycleHooks,omitempty"` @@ -242,11 +242,11 @@ type MachineSpec struct { // +listType=atomic Taints []corev1.Taint `json:"taints,omitempty"` - // ProviderSpec details Provider-specific configuration to use during node creation. + // providerSpec details Provider-specific configuration to use during node creation. // +optional ProviderSpec ProviderSpec `json:"providerSpec"` - // ProviderID is the identification ID of the machine provided by the provider. + // providerID is the identification ID of the machine provided by the provider. // This field must match the provider ID as seen on the node object corresponding to this machine. // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out @@ -277,14 +277,14 @@ type MachineSpec struct { // LifecycleHooks allow users to pause operations on the machine at // certain prefedined points within the machine lifecycle. type LifecycleHooks struct { - // PreDrain hooks prevent the machine from being drained. + // preDrain hooks prevent the machine from being drained. // This also blocks further lifecycle events, such as termination. // +listType=map // +listMapKey=name // +optional PreDrain []LifecycleHook `json:"preDrain,omitempty"` - // PreTerminate hooks prevent the machine from being terminated. + // preTerminate hooks prevent the machine from being terminated. // PreTerminate hooks be actioned after the Machine has been drained. // +listType=map // +listMapKey=name @@ -294,39 +294,39 @@ type LifecycleHooks struct { // LifecycleHook represents a single instance of a lifecycle hook type LifecycleHook struct { - // Name defines a unique name for the lifcycle hook. + // name defines a unique name for the lifcycle hook. // The name should be unique and descriptive, ideally 1-3 words, in CamelCase or // it may be namespaced, eg. foo.example.com/CamelCase. // Names must be unique and should only be managed by a single entity. // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` // +kubebuilder:validation:MinLength=3 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required + // +required Name string `json:"name"` - // Owner defines the owner of the lifecycle hook. + // owner defines the owner of the lifecycle hook. // This should be descriptive enough so that users can identify // who/what is responsible for blocking the lifecycle. // This could be the name of a controller (e.g. clusteroperator/etcd) // or an administrator managing the hook. // +kubebuilder:validation:MinLength=3 // +kubebuilder:validation:MaxLength=512 - // +kubebuilder:validation:Required + // +required Owner string `json:"owner"` } // MachineStatus defines the observed state of Machine // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" type MachineStatus struct { - // NodeRef will point to the corresponding Node if it exists. + // nodeRef will point to the corresponding Node if it exists. // +optional NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` - // LastUpdated identifies when this status was last observed. + // lastUpdated identifies when this status was last observed. // +optional LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - // ErrorReason will be set in the event that there is a terminal problem + // errorReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. // @@ -345,7 +345,7 @@ type MachineStatus struct { // +optional ErrorReason *MachineStatusError `json:"errorReason,omitempty"` - // ErrorMessage will be set in the event that there is a terminal problem + // errorMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. // @@ -364,7 +364,7 @@ type MachineStatus struct { // +optional ErrorMessage *string `json:"errorMessage,omitempty"` - // ProviderStatus details a Provider-specific status. + // providerStatus details a Provider-specific status. // It is recommended that providers maintain their // own versioned API types that should be // serialized/deserialized from this field. @@ -372,24 +372,24 @@ type MachineStatus struct { // +kubebuilder:validation:XPreserveUnknownFields ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` - // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + // addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. // +optional // +listType=atomic Addresses []corev1.NodeAddress `json:"addresses,omitempty"` - // LastOperation describes the last-operation performed by the machine-controller. + // lastOperation describes the last-operation performed by the machine-controller. // This API should be useful as a history in terms of the latest operation performed on the // specific machine. It should also convey the state of the latest-operation for example if // it is still on-going, failed or completed successfully. // +optional LastOperation *LastOperation `json:"lastOperation,omitempty"` - // Phase represents the current phase of machine actuation. + // phase represents the current phase of machine actuation. // One of: Failed, Provisioning, Provisioned, Running, Deleting // +optional Phase *string `json:"phase,omitempty"` - // Conditions defines the current state of the Machine + // conditions defines the current state of the Machine // +listType=map // +listMapKey=type Conditions []Condition `json:"conditions,omitempty"` @@ -415,17 +415,17 @@ type MachineStatus struct { // LastOperation represents the detail of the last performed operation on the MachineObject. type LastOperation struct { - // Description is the human-readable description of the last operation. + // description is the human-readable description of the last operation. Description *string `json:"description,omitempty"` - // LastUpdated is the timestamp at which LastOperation API was last-updated. + // lastUpdated is the timestamp at which LastOperation API was last-updated. LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - // State is the current status of the last performed operation. + // state is the current status of the last performed operation. // E.g. Processing, Failed, Successful etc State *string `json:"state,omitempty"` - // Type is the type of operation which was last performed. + // type is the type of operation which was last performed. // E.g. Create, Delete, Update etc Type *string `json:"type,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go index 912b7dea5..76c79acb0 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -64,7 +64,7 @@ type MachineHealthCheckSpec struct { // Note: An empty selector will match all machines. Selector metav1.LabelSelector `json:"selector"` - // UnhealthyConditions contains a list of the conditions that determine + // unhealthyConditions contains a list of the conditions that determine // whether a node is considered unhealthy. The conditions are combined in a // logical OR, i.e. if any of the conditions is met, the node is unhealthy. // @@ -96,7 +96,7 @@ type MachineHealthCheckSpec struct { // +optional NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` - // RemediationTemplate is a reference to a remediation template + // remediationTemplate is a reference to a remediation template // provided by an infrastructure provider. // // This field is completely optional, when filled, the MachineHealthCheck controller @@ -136,13 +136,13 @@ type MachineHealthCheckStatus struct { // +kubebuilder:validation:Minimum=0 CurrentHealthy *int `json:"currentHealthy"` - // RemediationsAllowed is the number of further remediations allowed by this machine health check before + // remediationsAllowed is the number of further remediations allowed by this machine health check before // maxUnhealthy short circuiting will be applied // +kubebuilder:validation:Minimum=0 // +optional RemediationsAllowed int32 `json:"remediationsAllowed"` - // Conditions defines the current state of the MachineHealthCheck + // conditions defines the current state of the MachineHealthCheck // +optional // +listType=map // +listMapKey=type diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index a14d50eb7..a29977f34 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -38,25 +38,25 @@ type MachineSet struct { // MachineSetSpec defines the desired state of MachineSet type MachineSetSpec struct { - // Replicas is the number of desired replicas. + // replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. // +kubebuilder:default=1 Replicas *int32 `json:"replicas,omitempty"` - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // minReadySeconds is the minimum number of seconds for which a newly created machine should be ready. // Defaults to 0 (machine will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - // DeletePolicy defines the policy used to identify nodes to delete when downscaling. + // deletePolicy defines the policy used to identify nodes to delete when downscaling. // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" // +kubebuilder:validation:Enum=Random;Newest;Oldest DeletePolicy string `json:"deletePolicy,omitempty"` - // Selector is a label query over machines that should match the replica count. + // selector is a label query over machines that should match the replica count. // Label keys and values that must match in order to be controlled by this MachineSet. // It must match the machine template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors Selector metav1.LabelSelector `json:"selector"` - // Template is the object that describes the machine that will be created if + // template is the object that describes the machine that will be created if // insufficient replicas are detected. // +optional Template MachineTemplateSpec `json:"template,omitempty"` @@ -113,7 +113,7 @@ type MachineTemplateSpec struct { // MachineSetStatus defines the observed state of MachineSet // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" type MachineSetStatus struct { - // Replicas is the most recently observed number of replicas. + // replicas is the most recently observed number of replicas. Replicas int32 `json:"replicas"` // The number of replicas that have labels matching the labels of the machine template of the MachineSet. // +optional @@ -124,7 +124,7 @@ type MachineSetStatus struct { // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty"` - // ObservedGeneration reflects the generation of the most recently observed MachineSet. + // observedGeneration reflects the generation of the most recently observed MachineSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` // In the event that there is a terminal problem reconciling the @@ -150,7 +150,7 @@ type MachineSetStatus struct { // +optional ErrorMessage *string `json:"errorMessage,omitempty"` - // Conditions defines the current state of the MachineSet + // conditions defines the current state of the MachineSet // +listType=map // +listMapKey=type Conditions []Condition `json:"conditions,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go index e9de63278..812358e89 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -11,7 +11,7 @@ type ProviderSpec struct { // No more than one of the following may be specified. - // Value is an inlined, serialized representation of the resource + // value is an inlined, serialized representation of the resource // configuration. It is recommended that providers maintain their own // versioned API types that should be serialized/deserialized from this // field, akin to component config. @@ -42,7 +42,7 @@ type ProviderSpec struct { // In future versions, controller-tools@v2 might allow overriding the type and validation for embedded // types. When that happens, this hack should be revisited. type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although + // name must be unique within a namespace. Is required when creating resources, although // some resources may allow a client to request the generation of an appropriate name // automatically. Name is primarily intended for creation idempotence and configuration // definition. @@ -51,7 +51,7 @@ type ObjectMeta struct { // +optional Name string `json:"name,omitempty"` - // GenerateName is an optional prefix, used by the server, to generate a unique + // generateName is an optional prefix, used by the server, to generate a unique // name ONLY IF the Name field has not been provided. // If this field is used, the name returned to the client will be different // than the name passed. This value will also be combined with a unique suffix. @@ -69,7 +69,7 @@ type ObjectMeta struct { // +optional GenerateName string `json:"generateName,omitempty"` - // Namespace defines the space within each name must be unique. An empty namespace is + // namespace defines the space within each name must be unique. An empty namespace is // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. @@ -87,7 +87,7 @@ type ObjectMeta struct { // +optional Labels map[string]string `json:"labels,omitempty"` - // Annotations is an unstructured key value map stored with a resource that may be + // annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. // More info: http://kubernetes.io/docs/user-guide/annotations @@ -193,18 +193,17 @@ const ( // Condition defines an observation of a Machine API resource operational state. type Condition struct { - // Type of condition in CamelCase or in foo.example.com/CamelCase. + // type of condition in CamelCase or in foo.example.com/CamelCase. // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions // can be useful (see .node.status.conditions), the ability to deconflict is important. // +required - // +kubebuilder:validation:Required Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. // +required Status corev1.ConditionStatus `json:"status"` - // Severity provides an explicit classification of Reason code, so the users or machines can immediately + // severity provides an explicit classification of Reason code, so the users or machines can immediately // understand the current situation and act accordingly. // The Severity field MUST be set only when Status=False. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go index f458cbf6e..fe6626f72 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go @@ -14,22 +14,22 @@ type VSphereMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with vSphere credentials. + // credentialsSecret is a reference to the secret with vSphere credentials. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // Template is the name, inventory path, or instance UUID of the template + // template is the name, inventory path, or instance UUID of the template // used to clone new machines. Template string `json:"template"` - // Workspace describes the workspace to use for the machine. + // workspace describes the workspace to use for the machine. // +optional Workspace *Workspace `json:"workspace,omitempty"` - // Network is the network configuration for this machine's VM. + // network is the network configuration for this machine's VM. Network NetworkSpec `json:"network"` - // NumCPUs is the number of virtual processors in a virtual machine. + // numCPUs is the number of virtual processors in a virtual machine. // Defaults to the analogue property value in the template from which this // machine is cloned. // +optional @@ -40,12 +40,12 @@ type VSphereMachineProviderSpec struct { // machine is cloned. // +optional NumCoresPerSocket int32 `json:"numCoresPerSocket,omitempty"` - // MemoryMiB is the size of a virtual machine's memory, in MiB. + // memoryMiB is the size of a virtual machine's memory, in MiB. // Defaults to the analogue property value in the template from which this // machine is cloned. // +optional MemoryMiB int64 `json:"memoryMiB,omitempty"` - // DiskGiB is the size of a virtual machine's disk, in GiB. + // diskGiB is the size of a virtual machine's disk, in GiB. // Defaults to the analogue property value in the template from which this // machine is cloned. // This parameter will be ignored if 'LinkedClone' CloneMode is set. @@ -57,10 +57,10 @@ type VSphereMachineProviderSpec struct { // +kubebuilder:example="urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL" // +optional TagIDs []string `json:"tagIDs,omitempty"` - // Snapshot is the name of the snapshot from which the VM was cloned + // snapshot is the name of the snapshot from which the VM was cloned // +optional Snapshot string `json:"snapshot"` - // CloneMode specifies the type of clone operation. + // cloneMode specifies the type of clone operation. // The LinkedClone mode is only support for templates that have at least // one snapshot. If the template has no snapshots, then CloneMode defaults // to FullClone. @@ -70,6 +70,15 @@ type VSphereMachineProviderSpec struct { // When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone. // +optional CloneMode CloneMode `json:"cloneMode,omitempty"` + // dataDisks is a list of non OS disks to be created and attached to the VM. The max number of disk allowed to be attached is + // currently 29. The max number of disks for any controller is 30, but VM template will always have OS disk so that will leave + // 29 disks on any controller type. + // +openshift:enable:FeatureGate=VSphereMultiDisk + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=29 + DataDisks []VSphereDisk `json:"dataDisks,omitempty"` } // CloneMode is the type of clone operation used to clone a VM from a template. @@ -89,7 +98,7 @@ const ( // NetworkSpec defines the virtual machine's network configuration. type NetworkSpec struct { - // Devices defines the virtual machine's network interfaces. + // devices defines the virtual machine's network interfaces. Devices []NetworkDeviceSpec `json:"devices"` } @@ -100,19 +109,19 @@ type AddressesFromPool struct { // This should be a fully qualified domain name, for example, externalipam.controller.io. // +kubebuilder:example=externalipam.controller.io // +kubebuilder:validation:Pattern="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // resource of the IP address pool type known to an external IPAM controller. // It is normally the plural form of the resource kind in lowercase, for example, // ippools. // +kubebuilder:example=ippools // +kubebuilder:validation:Pattern="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` // name of an IP address pool, for example, pool-config-1. // +kubebuilder:example=pool-config-1 // +kubebuilder:validation:Pattern="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -172,24 +181,73 @@ type NetworkDeviceSpec struct { AddressesFromPools []AddressesFromPool `json:"addressesFromPools,omitempty"` } +// VSphereDisk describes additional disks for vSphere. +type VSphereDisk struct { + // name is used to identify the disk definition. name is required needs to be unique so that it can be used to + // clearly identify purpose of the disk. + // It must be at most 80 characters in length and must consist only of alphanumeric characters, hyphens and underscores, + // and must start and end with an alphanumeric character. + // +kubebuilder:example=images_1 + // +kubebuilder:validation:MaxLength=80 + // +kubebuilder:validation:Pattern="^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$" + // +required + Name string `json:"name"` + // sizeGiB is the size of the disk in GiB. + // The maximum supported size 16384 GiB. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=16384 + // +required + SizeGiB int32 `json:"sizeGiB"` + // provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. + // Allowed values are "Thin", "Thick", "EagerlyZeroed", and omitted. + // When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. + // When set to Thick, the full disk size will be allocated when disk is created. + // When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. + // When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere. + // +optional + ProvisioningMode ProvisioningMode `json:"provisioningMode,omitempty"` +} + +// provisioningMode represents the various provisioning types available to a VMs disk. +// +kubebuilder:validation:Enum=Thin;Thick;EagerlyZeroed +type ProvisioningMode string + +const ( + // ProvisioningModeThin creates the disk using thin provisioning. This means a sparse (allocate on demand) + // format with additional space optimizations. + ProvisioningModeThin ProvisioningMode = "Thin" + + // ProvisioningModeThick creates the disk with all space allocated. + ProvisioningModeThick ProvisioningMode = "Thick" + + // ProvisioningModeEagerlyZeroed creates the disk using eager zero provisioning. An eager zeroed thick disk + // has all space allocated and wiped clean of any previous contents on the physical media at + // creation time. Such disks may take longer time during creation compared to other disk formats. + ProvisioningModeEagerlyZeroed ProvisioningMode = "EagerlyZeroed" +) + // WorkspaceConfig defines a workspace configuration for the vSphere cloud // provider. type Workspace struct { - // Server is the IP address or FQDN of the vSphere endpoint. + // server is the IP address or FQDN of the vSphere endpoint. // +optional Server string `gcfg:"server,omitempty" json:"server,omitempty"` - // Datacenter is the datacenter in which VMs are created/located. + // datacenter is the datacenter in which VMs are created/located. // +optional Datacenter string `gcfg:"datacenter,omitempty" json:"datacenter,omitempty"` - // Folder is the folder in which VMs are created/located. + // folder is the folder in which VMs are created/located. // +optional Folder string `gcfg:"folder,omitempty" json:"folder,omitempty"` - // Datastore is the datastore in which VMs are created/located. + // datastore is the datastore in which VMs are created/located. // +optional Datastore string `gcfg:"default-datastore,omitempty" json:"datastore,omitempty"` - // ResourcePool is the resource pool in which VMs are created/located. + // resourcePool is the resource pool in which VMs are created/located. // +optional ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"` + // vmGroup is the cluster vm group in which virtual machines will be added for vm host group based zonal. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + VMGroup string `gcfg:"vmGroup,omitempty" json:"vmGroup,omitempty"` } // VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. @@ -199,16 +257,19 @@ type Workspace struct { type VSphereMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` - // InstanceID is the ID of the instance in VSphere + // instanceId is the ID of the instance in VSphere // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the provisioning state of the VSphere Instance. + // instanceState is the provisioning state of the VSphere Instance. // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status + // +listType=map + // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // TaskRef is a managed object reference to a Task related to the machine. + // taskRef is a managed object reference to a Task related to the machine. // This value is set automatically at runtime and should not be set or // modified by users. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index d37ac11e6..ba9aae81e 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -1774,6 +1774,22 @@ func (in *VMDiskSecurityProfile) DeepCopy() *VMDiskSecurityProfile { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereDisk) DeepCopyInto(out *VSphereDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereDisk. +func (in *VSphereDisk) DeepCopy() *VSphereDisk { + if in == nil { + return nil + } + out := new(VSphereDisk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { *out = *in @@ -1800,6 +1816,11 @@ func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSp *out = make([]string, len(*in)) copy(*out, *in) } + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]VSphereDisk, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 5bba232bf..b2e55376f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -13,26 +13,27 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE var map_AWSMachineProviderConfig = map[string]string{ "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "ami": "AMI is the reference to the AMI from which to create the machine instance.", - "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", - "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", - "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", - "keyName": "KeyName is the name of the KeyPair to use for SSH", - "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", - "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", - "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", - "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", - "subnet": "Subnet is a reference to the subnet to use for this instance", - "placement": "Placement specifies where to create the instance in AWS", - "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", - "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", - "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", - "metadataServiceOptions": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", - "placementGroupName": "PlacementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", + "ami": "ami is the reference to the AMI from which to create the machine instance.", + "instanceType": "instanceType is the type of instance to create. Example: m4.xlarge", + "tags": "tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "iamInstanceProfile": "iamInstanceProfile is a reference to an IAM role to assign to the instance", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "keyName": "keyName is the name of the KeyPair to use for SSH", + "deviceIndex": "deviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "publicIp": "publicIp specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "networkInterfaceType": "networkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", + "securityGroups": "securityGroups is an array of references to security groups that should be applied to the instance.", + "subnet": "subnet is a reference to the subnet to use for this instance", + "placement": "placement specifies where to create the instance in AWS", + "loadBalancers": "loadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "blockDevices": "blockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "spotMarketOptions": "spotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "metadataServiceOptions": "metadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "placementGroupName": "placementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", "placementGroupPartition": "placementGroupPartition is the partition number within the placement group in which to launch the instance. This must be an integer value between 1 and 7. It is only valid if the placement group, referred in `PlacementGroupName` was created with strategy set to partition.", "capacityReservationId": "capacityReservationId specifies the target Capacity Reservation into which the instance should be launched. The field size should be greater than 0 and the field input must start with cr-***", + "marketType": "marketType specifies the type of market for the EC2 instance. Valid values are OnDemand, Spot, CapacityBlock and omitted.\n\nDefaults to OnDemand. When SpotMarketOptions is provided, the marketType defaults to \"Spot\".\n\nWhen set to OnDemand the instance runs as a standard OnDemand instance. When set to Spot the instance runs as a Spot instance. When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. If this value is selected, capacityReservationID must be specified to identify the target reservation.", } func (AWSMachineProviderConfig) SwaggerDoc() map[string]string { @@ -49,9 +50,9 @@ func (AWSMachineProviderConfigList) SwaggerDoc() map[string]string { var map_AWSMachineProviderStatus = map[string]string{ "": "AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains AWS-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "instanceId": "InstanceID is the instance ID of the machine created in AWS", - "instanceState": "InstanceState is the state of the AWS instance for this machine", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the instance ID of the machine created in AWS", + "instanceState": "instanceState is the state of the AWS instance for this machine", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", } func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { @@ -60,9 +61,9 @@ func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { var map_AWSResourceReference = map[string]string{ "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", - "id": "ID of resource", - "arn": "ARN of resource", - "filters": "Filters is a set of filters used to identify a resource", + "id": "id of resource", + "arn": "arn of resource", + "filters": "filters is a set of filters used to identify a resource", } func (AWSResourceReference) SwaggerDoc() map[string]string { @@ -97,8 +98,8 @@ func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string { var map_Filter = map[string]string{ "": "Filter is a filter used to identify an AWS resource", - "name": "Name of the filter. Filter names are case-sensitive.", - "values": "Values includes one or more filter values. Filter values are case-sensitive.", + "name": "name of the filter. Filter names are case-sensitive.", + "values": "values includes one or more filter values. Filter values are case-sensitive.", } func (Filter) SwaggerDoc() map[string]string { @@ -115,7 +116,7 @@ func (LoadBalancerReference) SwaggerDoc() map[string]string { var map_MetadataServiceOptions = map[string]string{ "": "MetadataServiceOptions defines the options available to a user when configuring Instance Metadata Service (IMDS) Options.", - "authentication": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "authentication": "authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", } func (MetadataServiceOptions) SwaggerDoc() map[string]string { @@ -124,9 +125,9 @@ func (MetadataServiceOptions) SwaggerDoc() map[string]string { var map_Placement = map[string]string{ "": "Placement indicates where to create the instance in AWS", - "region": "Region is the region to use to create the instance", - "availabilityZone": "AvailabilityZone is the availability zone of the instance", - "tenancy": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", + "region": "region is the region to use to create the instance", + "availabilityZone": "availabilityZone is the availability zone of the instance", + "tenancy": "tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", } func (Placement) SwaggerDoc() map[string]string { @@ -144,8 +145,8 @@ func (SpotMarketOptions) SwaggerDoc() map[string]string { var map_TagSpecification = map[string]string{ "": "TagSpecification is the name/value pair for a tag", - "name": "Name of the tag", - "value": "Value of the tag", + "name": "name of the tag", + "value": "value of the tag", } func (TagSpecification) SwaggerDoc() map[string]string { @@ -154,8 +155,8 @@ func (TagSpecification) SwaggerDoc() map[string]string { var map_AzureBootDiagnostics = map[string]string{ "": "AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. This allows you to configure capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", - "storageAccountType": "StorageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", - "customerManaged": "CustomerManaged provides reference to the customer manager storage account.", + "storageAccountType": "storageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", + "customerManaged": "customerManaged provides reference to the customer manager storage account.", } func (AzureBootDiagnostics) SwaggerDoc() map[string]string { @@ -164,7 +165,7 @@ func (AzureBootDiagnostics) SwaggerDoc() map[string]string { var map_AzureCustomerManagedBootDiagnostics = map[string]string{ "": "AzureCustomerManagedBootDiagnostics provides reference to a customer managed storage account.", - "storageAccountURI": "StorageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", + "storageAccountURI": "storageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", } func (AzureCustomerManagedBootDiagnostics) SwaggerDoc() map[string]string { @@ -182,33 +183,33 @@ func (AzureDiagnostics) SwaggerDoc() map[string]string { var map_AzureMachineProviderSpec = map[string]string{ "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with Azure credentials.", - "location": "Location is the region to use to create the instance", - "vmSize": "VMSize is the size of the VM to create.", - "image": "Image is the OS image to use to create the instance.", - "osDisk": "OSDisk represents the parameters for creating the OS disk.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with Azure credentials.", + "location": "location is the region to use to create the instance", + "vmSize": "vmSize is the size of the VM to create.", + "image": "image is the OS image to use to create the instance.", + "osDisk": "osDisk represents the parameters for creating the OS disk.", "dataDisks": "DataDisk specifies the parameters that are used to add one or more data disks to the machine.", - "sshPublicKey": "SSHPublicKey is the public key to use to SSH to the virtual machine.", - "publicIP": "PublicIP if true a public IP will be used", - "tags": "Tags is a list of tags to apply to the machine.", + "sshPublicKey": "sshPublicKey is the public key to use to SSH to the virtual machine.", + "publicIP": "publicIP if true a public IP will be used", + "tags": "tags is a list of tags to apply to the machine.", "securityGroup": "Network Security Group that needs to be attached to the machine's interface. No security group will be attached if empty.", "applicationSecurityGroups": "Application Security Groups that need to be attached to the machine's interface. No application security groups will be attached if zero-length.", - "subnet": "Subnet to use for this instance", - "publicLoadBalancer": "PublicLoadBalancer to use for this instance", + "subnet": "subnet to use for this instance", + "publicLoadBalancer": "publicLoadBalancer to use for this instance", "internalLoadBalancer": "InternalLoadBalancerName to use for this instance", - "natRule": "NatRule to set inbound NAT rule of the load balancer", - "managedIdentity": "ManagedIdentity to set managed identity name", - "vnet": "Vnet to set virtual network name", + "natRule": "natRule to set inbound NAT rule of the load balancer", + "managedIdentity": "managedIdentity to set managed identity name", + "vnet": "vnet to set virtual network name", "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone", - "networkResourceGroup": "NetworkResourceGroup is the resource group for the virtual machine's network", - "resourceGroup": "ResourceGroup is the resource group for the virtual machine", - "spotVMOptions": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM", - "securityProfile": "SecurityProfile specifies the Security profile settings for a virtual machine.", - "ultraSSDCapability": "UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", - "acceleratedNetworking": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", - "availabilitySet": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", - "diagnostics": "Diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", + "networkResourceGroup": "networkResourceGroup is the resource group for the virtual machine's network", + "resourceGroup": "resourceGroup is the resource group for the virtual machine", + "spotVMOptions": "spotVMOptions allows the ability to specify the Machine should use a Spot VM", + "securityProfile": "securityProfile specifies the Security profile settings for a virtual machine.", + "ultraSSDCapability": "ultraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", + "acceleratedNetworking": "acceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", + "availabilitySet": "availabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", + "diagnostics": "diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", "capacityReservationGroupID": "capacityReservationGroupID specifies the capacity reservation group resource id that should be used for allocating the virtual machine. The field size should be greater than 0 and the field input must start with '/'. The input for capacityReservationGroupID must be similar to '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'. The keys which are used should be among 'subscriptions', 'providers' and 'resourcegroups' followed by valid ID or names respectively.", } @@ -218,9 +219,9 @@ func (AzureMachineProviderSpec) SwaggerDoc() map[string]string { var map_AzureMachineProviderStatus = map[string]string{ "": "AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains Azure-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "vmId": "VMID is the ID of the virtual machine created in Azure.", - "vmState": "VMState is the provisioning state of the Azure virtual machine.", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.", + "vmId": "vmId is the ID of the virtual machine created in Azure.", + "vmState": "vmState is the provisioning state of the Azure virtual machine.", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status.", } func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { @@ -238,12 +239,12 @@ func (ConfidentialVM) SwaggerDoc() map[string]string { var map_DataDisk = map[string]string{ "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", - "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", - "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", - "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", - "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", - "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", - "deletionPolicy": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", + "nameSuffix": "nameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", + "diskSizeGB": "diskSizeGB is the size in GB to assign to the data disk.", + "managedDisk": "managedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", + "lun": "lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", + "cachingType": "cachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "deletionPolicy": "deletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", } func (DataDisk) SwaggerDoc() map[string]string { @@ -252,8 +253,8 @@ func (DataDisk) SwaggerDoc() map[string]string { var map_DataDiskManagedDiskParameters = map[string]string{ "": "DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk.", - "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", - "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", + "storageAccountType": "storageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", + "diskEncryptionSet": "diskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", } func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string { @@ -262,7 +263,7 @@ func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string { var map_DiskEncryptionSetParameters = map[string]string{ "": "DiskEncryptionSetParameters is the disk encryption set properties", - "id": "ID is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", + "id": "id is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", } func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { @@ -271,7 +272,7 @@ func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { var map_DiskSettings = map[string]string{ "": "DiskSettings describe ephemeral disk settings for the os disk.", - "ephemeralStorageLocation": "EphemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", + "ephemeralStorageLocation": "ephemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", } func (DiskSettings) SwaggerDoc() map[string]string { @@ -280,12 +281,12 @@ func (DiskSettings) SwaggerDoc() map[string]string { var map_Image = map[string]string{ "": "Image is a mirror of azure sdk compute.ImageReference", - "publisher": "Publisher is the name of the organization that created the image", - "offer": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", - "sku": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", - "version": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", - "resourceID": "ResourceID specifies an image to use by ID", - "type": "Type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", + "publisher": "publisher is the name of the organization that created the image", + "offer": "offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", + "sku": "sku specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", + "version": "version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", + "resourceID": "resourceID specifies an image to use by ID", + "type": "type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", } func (Image) SwaggerDoc() map[string]string { @@ -293,11 +294,11 @@ func (Image) SwaggerDoc() map[string]string { } var map_OSDisk = map[string]string{ - "osType": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", - "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the OS disk.", - "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", - "diskSettings": "DiskSettings describe ephemeral disk settings for the os disk.", - "cachingType": "CachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", + "osType": "osType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", + "managedDisk": "managedDisk specifies the Managed Disk parameters for the OS disk.", + "diskSizeGB": "diskSizeGB is the size in GB to assign to the data disk.", + "diskSettings": "diskSettings describe ephemeral disk settings for the os disk.", + "cachingType": "cachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", } func (OSDisk) SwaggerDoc() map[string]string { @@ -306,8 +307,8 @@ func (OSDisk) SwaggerDoc() map[string]string { var map_OSDiskManagedDiskParameters = map[string]string{ "": "OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.", - "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", - "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties", + "storageAccountType": "storageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", + "diskEncryptionSet": "diskEncryptionSet is the disk encryption set properties", "securityProfile": "securityProfile specifies the security profile for the managed disk.", } @@ -338,7 +339,7 @@ func (SecuritySettings) SwaggerDoc() map[string]string { var map_SpotVMOptions = map[string]string{ "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs", - "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", + "maxPrice": "maxPrice defines the maximum price the user is willing to pay for Spot VM instances", } func (SpotVMOptions) SwaggerDoc() map[string]string { @@ -376,13 +377,13 @@ func (VMDiskSecurityProfile) SwaggerDoc() map[string]string { var map_GCPDisk = map[string]string{ "": "GCPDisk describes disks for GCP.", - "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", - "boot": "Boot indicates if this is a boot disk (default false).", - "sizeGb": "SizeGB is the size of the disk (in GB).", - "type": "Type is the type of the disk (eg: pd-standard).", - "image": "Image is the source image to create this disk.", - "labels": "Labels list of labels to apply to the disk.", - "encryptionKey": "EncryptionKey is the customer-supplied encryption key of the disk.", + "autoDelete": "autoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", + "boot": "boot indicates if this is a boot disk (default false).", + "sizeGb": "sizeGb is the size of the disk (in GB).", + "type": "type is the type of the disk (eg: pd-standard).", + "image": "image is the source image to create this disk.", + "labels": "labels list of labels to apply to the disk.", + "encryptionKey": "encryptionKey is the customer-supplied encryption key of the disk.", } func (GCPDisk) SwaggerDoc() map[string]string { @@ -392,7 +393,7 @@ func (GCPDisk) SwaggerDoc() map[string]string { var map_GCPEncryptionKeyReference = map[string]string{ "": "GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.", "kmsKey": "KMSKeyName is the reference KMS key, in the format", - "kmsKeyServiceAccount": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", + "kmsKeyServiceAccount": "kmsKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", } func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { @@ -401,8 +402,8 @@ func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { var map_GCPGPUConfig = map[string]string{ "": "GCPGPUConfig describes type and count of GPUs attached to the instance on GCP.", - "count": "Count is the number of GPUs to be attached to an instance.", - "type": "Type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", + "count": "count is the number of GPUs to be attached to an instance.", + "type": "type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", } func (GCPGPUConfig) SwaggerDoc() map[string]string { @@ -411,10 +412,10 @@ func (GCPGPUConfig) SwaggerDoc() map[string]string { var map_GCPKMSKeyReference = map[string]string{ "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", - "name": "Name is the name of the customer managed encryption key to be used for the disk encryption.", - "keyRing": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.", - "projectID": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", - "location": "Location is the GCP location in which the Key Ring exists.", + "name": "name is the name of the customer managed encryption key to be used for the disk encryption.", + "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to.", + "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", + "location": "location is the GCP location in which the Key Ring exists.", } func (GCPKMSKeyReference) SwaggerDoc() map[string]string { @@ -424,27 +425,27 @@ func (GCPKMSKeyReference) SwaggerDoc() map[string]string { var map_GCPMachineProviderSpec = map[string]string{ "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with GCP credentials.", - "canIPForward": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", - "deletionProtection": "DeletionProtection whether the resource should be protected against deletion.", - "disks": "Disks is a list of disks to be attached to the VM.", - "labels": "Labels list of labels to apply to the VM.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with GCP credentials.", + "canIPForward": "canIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", + "deletionProtection": "deletionProtection whether the resource should be protected against deletion.", + "disks": "disks is a list of disks to be attached to the VM.", + "labels": "labels list of labels to apply to the VM.", "gcpMetadata": "Metadata key/value pairs to apply to the VM.", - "networkInterfaces": "NetworkInterfaces is a list of network interfaces to be attached to the VM.", - "serviceAccounts": "ServiceAccounts is a list of GCP service accounts to be used by the VM.", - "tags": "Tags list of network tags to apply to the VM.", - "targetPools": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", - "machineType": "MachineType is the machine type to use for the VM.", - "region": "Region is the region in which the GCP machine provider will create the VM.", - "zone": "Zone is the zone in which the GCP machine provider will create the VM.", - "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", - "gpus": "GPUs is a list of GPUs to be attached to the VM.", - "preemptible": "Preemptible indicates if created instance is preemptible.", - "onHostMaintenance": "OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", - "restartPolicy": "RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", - "shieldedInstanceConfig": "ShieldedInstanceConfig is the Shielded VM configuration for the VM", - "confidentialCompute": "confidentialCompute Defines whether the instance should have confidential compute enabled. If enabled OnHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is false.", + "networkInterfaces": "networkInterfaces is a list of network interfaces to be attached to the VM.", + "serviceAccounts": "serviceAccounts is a list of GCP service accounts to be used by the VM.", + "tags": "tags list of network tags to apply to the VM.", + "targetPools": "targetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", + "machineType": "machineType is the machine type to use for the VM.", + "region": "region is the region in which the GCP machine provider will create the VM.", + "zone": "zone is the zone in which the GCP machine provider will create the VM.", + "projectID": "projectID is the project in which the GCP machine provider will create the VM.", + "gpus": "gpus is a list of GPUs to be attached to the VM.", + "preemptible": "preemptible indicates if created instance is preemptible.", + "onHostMaintenance": "onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", + "restartPolicy": "restartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", + "shieldedInstanceConfig": "shieldedInstanceConfig is the Shielded VM configuration for the VM", + "confidentialCompute": "confidentialCompute is an optional field defining whether the instance should have confidential compute enabled or not, and the confidential computing technology of choice. Allowed values are omitted, Disabled, Enabled, AMDEncryptedVirtualization, AMDEncryptedVirtualizationNestedPaging, and IntelTrustedDomainExtensions When set to Disabled, the machine will not be configured to be a confidential computing instance. When set to Enabled, the machine will be configured as a confidential computing instance with no preference on the confidential compute policy used. In this mode, the platform chooses a default that is subject to change over time. Currently, the default is to use AMD Secure Encrypted Virtualization. When set to AMDEncryptedVirtualization, the machine will be configured as a confidential computing instance with AMD Secure Encrypted Virtualization (AMD SEV) as the confidential computing technology. When set to AMDEncryptedVirtualizationNestedPaging, the machine will be configured as a confidential computing instance with AMD Secure Encrypted Virtualization Secure Nested Paging (AMD SEV-SNP) as the confidential computing technology. When set to IntelTrustedDomainExtensions, the machine will be configured as a confidential computing instance with Intel Trusted Domain Extensions (Intel TDX) as the confidential computing technology. If any value other than Disabled is set the selected machine type must support that specific confidential computing technology. The machine series supporting confidential computing technologies can be checked at https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#all-confidential-vm-instances Currently, AMDEncryptedVirtualization is supported in c2d, n2d, and c3d machines. AMDEncryptedVirtualizationNestedPaging is supported in n2d machines. IntelTrustedDomainExtensions is supported in c3 machines. If any value other than Disabled is set, the selected region must support that specific confidential computing technology. The list of regions supporting confidential computing technologies can be checked at https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#supported-zones If any value other than Disabled is set onHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", "resourceManagerTags": "resourceManagerTags is an optional list of tags to apply to the GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", } @@ -454,9 +455,9 @@ func (GCPMachineProviderSpec) SwaggerDoc() map[string]string { var map_GCPMachineProviderStatus = map[string]string{ "": "GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains GCP-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "instanceId": "InstanceID is the ID of the instance in GCP", - "instanceState": "InstanceState is the provisioning state of the GCP Instance.", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the ID of the instance in GCP", + "instanceState": "instanceState is the provisioning state of the GCP Instance.", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", } func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { @@ -465,8 +466,8 @@ func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { var map_GCPMetadata = map[string]string{ "": "GCPMetadata describes metadata for GCP.", - "key": "Key is the metadata key.", - "value": "Value is the metadata value.", + "key": "key is the metadata key.", + "value": "value is the metadata value.", } func (GCPMetadata) SwaggerDoc() map[string]string { @@ -475,10 +476,10 @@ func (GCPMetadata) SwaggerDoc() map[string]string { var map_GCPNetworkInterface = map[string]string{ "": "GCPNetworkInterface describes network interfaces for GCP", - "publicIP": "PublicIP indicates if true a public IP will be used", - "network": "Network is the network name.", - "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", - "subnetwork": "Subnetwork is the subnetwork name.", + "publicIP": "publicIP indicates if true a public IP will be used", + "network": "network is the network name.", + "projectID": "projectID is the project in which the GCP machine provider will create the VM.", + "subnetwork": "subnetwork is the subnetwork name.", } func (GCPNetworkInterface) SwaggerDoc() map[string]string { @@ -487,8 +488,8 @@ func (GCPNetworkInterface) SwaggerDoc() map[string]string { var map_GCPServiceAccount = map[string]string{ "": "GCPServiceAccount describes service accounts for GCP.", - "email": "Email is the service account email.", - "scopes": "Scopes list of scopes to be assigned to the service account.", + "email": "email is the service account email.", + "scopes": "scopes list of scopes to be assigned to the service account.", } func (GCPServiceAccount) SwaggerDoc() map[string]string { @@ -497,9 +498,9 @@ func (GCPServiceAccount) SwaggerDoc() map[string]string { var map_GCPShieldedInstanceConfig = map[string]string{ "": "GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring.", - "secureBoot": "SecureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", - "virtualizedTrustedPlatformModule": "VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", - "integrityMonitoring": "IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "secureBoot": "secureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", + "virtualizedTrustedPlatformModule": "virtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "integrityMonitoring": "integrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", } func (GCPShieldedInstanceConfig) SwaggerDoc() map[string]string { @@ -519,10 +520,10 @@ func (ResourceManagerTag) SwaggerDoc() map[string]string { var map_LastOperation = map[string]string{ "": "LastOperation represents the detail of the last performed operation on the MachineObject.", - "description": "Description is the human-readable description of the last operation.", - "lastUpdated": "LastUpdated is the timestamp at which LastOperation API was last-updated.", - "state": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", - "type": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc", + "description": "description is the human-readable description of the last operation.", + "lastUpdated": "lastUpdated is the timestamp at which LastOperation API was last-updated.", + "state": "state is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", + "type": "type is the type of operation which was last performed. E.g. Create, Delete, Update etc", } func (LastOperation) SwaggerDoc() map[string]string { @@ -531,8 +532,8 @@ func (LastOperation) SwaggerDoc() map[string]string { var map_LifecycleHook = map[string]string{ "": "LifecycleHook represents a single instance of a lifecycle hook", - "name": "Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", - "owner": "Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", + "name": "name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", + "owner": "owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", } func (LifecycleHook) SwaggerDoc() map[string]string { @@ -541,8 +542,8 @@ func (LifecycleHook) SwaggerDoc() map[string]string { var map_LifecycleHooks = map[string]string{ "": "LifecycleHooks allow users to pause operations on the machine at certain prefedined points within the machine lifecycle.", - "preDrain": "PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", - "preTerminate": "PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", + "preDrain": "preDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", + "preTerminate": "preTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", } func (LifecycleHooks) SwaggerDoc() map[string]string { @@ -570,10 +571,10 @@ func (MachineList) SwaggerDoc() map[string]string { var map_MachineSpec = map[string]string{ "": "MachineSpec defines the desired state of Machine", "metadata": "ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node.", - "lifecycleHooks": "LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", + "lifecycleHooks": "lifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", "taints": "The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints", - "providerSpec": "ProviderSpec details Provider-specific configuration to use during node creation.", - "providerID": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", + "providerSpec": "providerSpec details Provider-specific configuration to use during node creation.", + "providerID": "providerID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI and ClusterAPI. When set to MachineAPI, writes to the spec of the machine.openshift.io copy of this resource will be reflected into the cluster.x-k8s.io copy. When set to ClusterAPI, writes to the spec of the cluster.x-k8s.io copy of this resource will be reflected into the machine.openshift.io copy. Updates to the status will be reflected in both copies of the resource, based on the controller implementing the functionality of the API. Currently the authoritative API determines which controller will manage the resource, this will change in a future release. To ensure the change has been accepted, please verify that the `status.authoritativeAPI` field has been updated to the desired value and that the `Synchronized` condition is present and set to `True`.", } @@ -583,15 +584,15 @@ func (MachineSpec) SwaggerDoc() map[string]string { var map_MachineStatus = map[string]string{ "": "MachineStatus defines the observed state of Machine", - "nodeRef": "NodeRef will point to the corresponding Node if it exists.", - "lastUpdated": "LastUpdated identifies when this status was last observed.", - "errorReason": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", - "errorMessage": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", - "providerStatus": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", - "addresses": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", - "lastOperation": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", - "phase": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", - "conditions": "Conditions defines the current state of the Machine", + "nodeRef": "nodeRef will point to the corresponding Node if it exists.", + "lastUpdated": "lastUpdated identifies when this status was last observed.", + "errorReason": "errorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "errorMessage": "errorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "providerStatus": "providerStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", + "addresses": "addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", + "lastOperation": "lastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", + "phase": "phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", + "conditions": "conditions defines the current state of the Machine", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } @@ -623,10 +624,10 @@ func (MachineHealthCheckList) SwaggerDoc() map[string]string { var map_MachineHealthCheckSpec = map[string]string{ "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", - "unhealthyConditions": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", + "unhealthyConditions": "unhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", - "remediationTemplate": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", + "remediationTemplate": "remediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", } func (MachineHealthCheckSpec) SwaggerDoc() map[string]string { @@ -637,8 +638,8 @@ var map_MachineHealthCheckStatus = map[string]string{ "": "MachineHealthCheckStatus defines the observed state of MachineHealthCheck", "expectedMachines": "total number of machines counted by this machine health check", "currentHealthy": "total number of machines counted by this machine health check", - "remediationsAllowed": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", - "conditions": "Conditions defines the current state of the MachineHealthCheck", + "remediationsAllowed": "remediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", + "conditions": "conditions defines the current state of the MachineHealthCheck", } func (MachineHealthCheckStatus) SwaggerDoc() map[string]string { @@ -674,11 +675,11 @@ func (MachineSetList) SwaggerDoc() map[string]string { var map_MachineSetSpec = map[string]string{ "": "MachineSetSpec defines the desired state of MachineSet", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", - "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", - "deletePolicy": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", - "selector": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the machine that will be created if insufficient replicas are detected.", + "replicas": "replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + "deletePolicy": "deletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", + "selector": "selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the machine that will be created if insufficient replicas are detected.", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI and ClusterAPI. When set to MachineAPI, writes to the spec of the machine.openshift.io copy of this resource will be reflected into the cluster.x-k8s.io copy. When set to ClusterAPI, writes to the spec of the cluster.x-k8s.io copy of this resource will be reflected into the machine.openshift.io copy. Updates to the status will be reflected in both copies of the resource, based on the controller implementing the functionality of the API. Currently the authoritative API determines which controller will manage the resource, this will change in a future release. To ensure the change has been accepted, please verify that the `status.authoritativeAPI` field has been updated to the desired value and that the `Synchronized` condition is present and set to `True`.", } @@ -688,13 +689,13 @@ func (MachineSetSpec) SwaggerDoc() map[string]string { var map_MachineSetStatus = map[string]string{ "": "MachineSetStatus defines the observed state of MachineSet", - "replicas": "Replicas is the most recently observed number of replicas.", + "replicas": "replicas is the most recently observed number of replicas.", "fullyLabeledReplicas": "The number of replicas that have labels matching the labels of the machine template of the MachineSet.", "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed MachineSet.", + "observedGeneration": "observedGeneration reflects the generation of the most recently observed MachineSet.", "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", - "conditions": "Conditions defines the current state of the MachineSet", + "conditions": "conditions defines the current state of the MachineSet", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } @@ -715,9 +716,9 @@ func (MachineTemplateSpec) SwaggerDoc() map[string]string { var map_Condition = map[string]string{ "": "Condition defines an observation of a Machine API resource operational state.", - "type": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", - "status": "Status of the condition, one of True, False, Unknown.", - "severity": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + "type": "type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + "status": "status of the condition, one of True, False, Unknown.", + "severity": "severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", "lastTransitionTime": "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", "reason": "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", "message": "A human readable message indicating details about the transition. This field may be empty.", @@ -729,11 +730,11 @@ func (Condition) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. This is a copy of customizable fields from metav1.ObjectMeta.\n\nObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases and read-only fields which end up in the generated CRD validation, having it as a subset simplifies the API and some issues that can impact user experience.\n\nDuring the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, specifically `spec.metadata.creationTimestamp in body must be of type string: \"null\"`. The investigation showed that `controller-tools@v2` behaves differently than its previous version when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.\n\nIn more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` had validation properties, including for `creationTimestamp` (metav1.Time). The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` which breaks validation because the field isn't marked as nullable.\n\nIn future versions, controller-tools@v2 might allow overriding the type and validation for embedded types. When that happens, this hack should be revisited.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "name": "name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "generateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "annotations": "annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", } @@ -743,7 +744,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_ProviderSpec = map[string]string{ "": "ProviderSpec defines the configuration to use during node creation.", - "value": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", + "value": "value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", } func (ProviderSpec) SwaggerDoc() map[string]string { @@ -776,27 +777,39 @@ func (NetworkDeviceSpec) SwaggerDoc() map[string]string { var map_NetworkSpec = map[string]string{ "": "NetworkSpec defines the virtual machine's network configuration.", - "devices": "Devices defines the virtual machine's network interfaces.", + "devices": "devices defines the virtual machine's network interfaces.", } func (NetworkSpec) SwaggerDoc() map[string]string { return map_NetworkSpec } +var map_VSphereDisk = map[string]string{ + "": "VSphereDisk describes additional disks for vSphere.", + "name": "name is used to identify the disk definition. name is required needs to be unique so that it can be used to clearly identify purpose of the disk. It must be at most 80 characters in length and must consist only of alphanumeric characters, hyphens and underscores, and must start and end with an alphanumeric character.", + "sizeGiB": "sizeGiB is the size of the disk in GiB. The maximum supported size 16384 GiB.", + "provisioningMode": "provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. Allowed values are \"Thin\", \"Thick\", \"EagerlyZeroed\", and omitted. When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. When set to Thick, the full disk size will be allocated when disk is created. When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere.", +} + +func (VSphereDisk) SwaggerDoc() map[string]string { + return map_VSphereDisk +} + var map_VSphereMachineProviderSpec = map[string]string{ "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with vSphere credentials.", - "template": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.", - "workspace": "Workspace describes the workspace to use for the machine.", - "network": "Network is the network configuration for this machine's VM.", - "numCPUs": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with vSphere credentials.", + "template": "template is the name, inventory path, or instance UUID of the template used to clone new machines.", + "workspace": "workspace describes the workspace to use for the machine.", + "network": "network is the network configuration for this machine's VM.", + "numCPUs": "numCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", "numCoresPerSocket": "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", - "memoryMiB": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", - "diskGiB": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", + "memoryMiB": "memoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "diskGiB": "diskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", "tagIDs": "tagIDs is an optional set of tags to add to an instance. Specified tagIDs must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified.", - "snapshot": "Snapshot is the name of the snapshot from which the VM was cloned", - "cloneMode": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", + "snapshot": "snapshot is the name of the snapshot from which the VM was cloned", + "cloneMode": "cloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", + "dataDisks": "dataDisks is a list of non OS disks to be created and attached to the VM. The max number of disk allowed to be attached is currently 29. The max number of disks for any controller is 30, but VM template will always have OS disk so that will leave 29 disks on any controller type.", } func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { @@ -805,10 +818,10 @@ func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { var map_VSphereMachineProviderStatus = map[string]string{ "": "VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains VSphere-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "instanceId": "InstanceID is the ID of the instance in VSphere", - "instanceState": "InstanceState is the provisioning state of the VSphere Instance.", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", - "taskRef": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", + "instanceId": "instanceId is the ID of the instance in VSphere", + "instanceState": "instanceState is the provisioning state of the VSphere Instance.", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "taskRef": "taskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", } func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { @@ -817,11 +830,12 @@ func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { var map_Workspace = map[string]string{ "": "WorkspaceConfig defines a workspace configuration for the vSphere cloud provider.", - "server": "Server is the IP address or FQDN of the vSphere endpoint.", - "datacenter": "Datacenter is the datacenter in which VMs are created/located.", - "folder": "Folder is the folder in which VMs are created/located.", - "datastore": "Datastore is the datastore in which VMs are created/located.", - "resourcePool": "ResourcePool is the resource pool in which VMs are created/located.", + "server": "server is the IP address or FQDN of the vSphere endpoint.", + "datacenter": "datacenter is the datacenter in which VMs are created/located.", + "folder": "folder is the folder in which VMs are created/located.", + "datastore": "datastore is the datastore in which VMs are created/located.", + "resourcePool": "resourcePool is the resource pool in which VMs are created/located.", + "vmGroup": "vmGroup is the cluster vm group in which virtual machines will be added for vm host group based zonal.", } func (Workspace) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/monitoring/v1/types.go b/vendor/github.com/openshift/api/monitoring/v1/types.go index 111538ba7..faa250ed3 100644 --- a/vendor/github.com/openshift/api/monitoring/v1/types.go +++ b/vendor/github.com/openshift/api/monitoring/v1/types.go @@ -44,7 +44,7 @@ type AlertingRule struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the desired state of this AlertingRule object. - // +kubebuilder:validation:Required + // +required Spec AlertingRuleSpec `json:"spec"` // status describes the current state of this AlertOverrides object. @@ -67,8 +67,8 @@ type AlertingRuleList struct { metav1.ListMeta `json:"metadata,omitempty"` // items is a list of AlertingRule objects. - // +kubebuilder:validation:Required - Items []AlertingRule `json:"items"` + // +optional + Items []AlertingRule `json:"items,omitempty"` } // AlertingRuleSpec is the desired state of an AlertingRule resource. @@ -93,7 +93,7 @@ type AlertingRuleSpec struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required + // +required Groups []RuleGroup `json:"groups"` } @@ -110,7 +110,7 @@ type Duration string type RuleGroup struct { // name is the name of the group. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 Name string `json:"name"` @@ -130,7 +130,7 @@ type RuleGroup struct { // processed sequentially, and all rules are processed. // // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required + // +required Rules []Rule `json:"rules"` } @@ -143,7 +143,7 @@ type Rule struct { // alert is the name of the alert. Must be a valid label value, i.e. may // contain any Unicode character. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 Alert string `json:"alert"` @@ -157,7 +157,7 @@ type Rule struct { // to create an always-firing "Watchdog" alert in order to ensure the alerting // pipeline is functional. // - // +kubebuilder:validation:Required + // +required Expr intstr.IntOrString `json:"expr"` // for is the time period after which alerts are considered firing after first @@ -210,7 +210,7 @@ type PrometheusRuleRef struct { // the reference should we ever need to. // name of the referenced PrometheusRule. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 Name string `json:"name"` @@ -238,7 +238,7 @@ type AlertRelabelConfig struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the desired state of this AlertRelabelConfig object. - // +kubebuilder:validation:Required + // +required Spec AlertRelabelConfigSpec `json:"spec"` // status describes the current state of this AlertRelabelConfig object. @@ -254,7 +254,7 @@ type AlertRelabelConfigSpec struct { // configs is a list of sequentially evaluated alert relabel configs. // // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required + // +required Configs []RelabelConfig `json:"configs"` } @@ -264,6 +264,8 @@ type AlertRelabelConfigStatus struct { // empty. // // +optional + // +listType=map + // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -285,9 +287,8 @@ type AlertRelabelConfigList struct { metav1.ListMeta `json:"metadata,omitempty"` // items is a list of AlertRelabelConfigs. - // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required - Items []*AlertRelabelConfig `json:"items"` + // +optional + Items []AlertRelabelConfig `json:"items,omitempty"` } // LabelName is a valid Prometheus label name which may only contain ASCII diff --git a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go index cb472ccf5..67af0e082 100644 --- a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go @@ -45,13 +45,9 @@ func (in *AlertRelabelConfigList) DeepCopyInto(out *AlertRelabelConfigList) { in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*AlertRelabelConfig, len(*in)) + *out = make([]AlertRelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(AlertRelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return diff --git a/vendor/github.com/openshift/api/network/v1/generated.proto b/vendor/github.com/openshift/api/network/v1/generated.proto index a429f04c0..4fc68a974 100644 --- a/vendor/github.com/openshift/api/network/v1/generated.proto +++ b/vendor/github.com/openshift/api/network/v1/generated.proto @@ -29,39 +29,37 @@ message ClusterNetwork { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Network is a CIDR string specifying the global overlay network's L3 space + // network is a CIDR string specifying the global overlay network's L3 space // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string network = 2; - // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 optional uint32 hostsubnetlength = 3; - // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // serviceNetwork is the CIDR range that Service IP addresses are allocated from // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string serviceNetwork = 4; - // PluginName is the name of the network plugin being used + // pluginName is the name of the network plugin being used optional string pluginName = 5; - // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. repeated ClusterNetworkEntry clusterNetworks = 6; - // VXLANPort sets the VXLAN destination port used by the cluster. + // vxlanPort sets the VXLAN destination port used by the cluster. // It is set by the master configuration file on startup and cannot be edited manually. // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional // +optional optional uint32 vxlanPort = 7; - // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 - // +kubebuilder:validation:Optional // +optional optional uint32 mtu = 8; } @@ -72,7 +70,7 @@ message ClusterNetworkEntry { // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string cidr = 1; - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 optional uint32 hostSubnetLength = 2; @@ -87,7 +85,7 @@ message ClusterNetworkList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of cluster networks + // items is the list of cluster networks repeated ClusterNetwork items = 2; } @@ -125,14 +123,14 @@ message EgressNetworkPolicyList { // EgressNetworkPolicyPeer specifies a target to apply egress network policy to message EgressNetworkPolicyPeer { - // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset // Ideally we would have liked to use the cidr openapi format for this property. // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs // We are therefore using a regex pattern to validate instead. // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string cidrSelector = 1; - // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` optional string dnsName = 2; } @@ -172,25 +170,25 @@ message HostSubnet { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // host is the name of the node. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` optional string host = 2; - // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // hostIP is the IP address to be used as a VTEP by other nodes in the overlay network // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` optional string hostIP = 3; - // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // subnet is the CIDR range of the overlay network assigned to the node for its pods // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string subnet = 4; - // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // egressIPs is the list of automatic egress IP addresses currently hosted by this node. // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the // master will overwrite the value here with its own allocation of egress IPs. // +optional repeated string egressIPs = 5; - // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egressCIDRs is the list of CIDR ranges available for automatically assigning // egress IPs to this node from. If this field is set then EgressIPs should be // treated as read-only. // +optional @@ -206,7 +204,7 @@ message HostSubnetList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of host subnets + // items is the list of host subnets repeated HostSubnet items = 2; } @@ -227,16 +225,16 @@ message NetNamespace { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` optional string netname = 2; - // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=16777215 optional uint32 netid = 3; - // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. // (If empty, external traffic will be masqueraded to Node IPs.) // +optional repeated string egressIPs = 4; @@ -251,7 +249,7 @@ message NetNamespaceList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of net namespaces + // items is the list of net namespaces repeated NetNamespace items = 2; } diff --git a/vendor/github.com/openshift/api/network/v1/types.go b/vendor/github.com/openshift/api/network/v1/types.go index 89015cf6b..779080213 100644 --- a/vendor/github.com/openshift/api/network/v1/types.go +++ b/vendor/github.com/openshift/api/network/v1/types.go @@ -32,39 +32,37 @@ type ClusterNetwork struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Network is a CIDR string specifying the global overlay network's L3 space + // network is a CIDR string specifying the global overlay network's L3 space // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"` - // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"` - // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // serviceNetwork is the CIDR range that Service IP addresses are allocated from // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"` - // PluginName is the name of the network plugin being used + // pluginName is the name of the network plugin being used PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"` - // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"` - // VXLANPort sets the VXLAN destination port used by the cluster. + // vxlanPort sets the VXLAN destination port used by the cluster. // It is set by the master configuration file on startup and cannot be edited manually. // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional // +optional VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"` - // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 - // +kubebuilder:validation:Optional // +optional MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"` } @@ -75,7 +73,7 @@ type ClusterNetworkEntry struct { // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"` - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"` @@ -94,7 +92,7 @@ type ClusterNetworkList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of cluster networks + // items is the list of cluster networks Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -134,25 +132,25 @@ type HostSubnet struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // host is the name of the node. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` Host string `json:"host" protobuf:"bytes,2,opt,name=host"` - // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // hostIP is the IP address to be used as a VTEP by other nodes in the overlay network // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"` - // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // subnet is the CIDR range of the overlay network assigned to the node for its pods // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"` - // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // egressIPs is the list of automatic egress IP addresses currently hosted by this node. // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the // master will overwrite the value here with its own allocation of egress IPs. // +optional EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"` - // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egressCIDRs is the list of CIDR ranges available for automatically assigning // egress IPs to this node from. If this field is set then EgressIPs should be // treated as read-only. // +optional @@ -172,7 +170,7 @@ type HostSubnetList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of host subnets + // items is the list of host subnets Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -204,16 +202,16 @@ type NetNamespace struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"` - // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=16777215 NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"` - // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. // (If empty, external traffic will be masqueraded to Node IPs.) // +optional EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"` @@ -232,7 +230,7 @@ type NetNamespaceList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of net namespaces + // items is the list of net namespaces Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -247,13 +245,13 @@ const ( // EgressNetworkPolicyPeer specifies a target to apply egress network policy to type EgressNetworkPolicyPeer struct { - // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset // Ideally we would have liked to use the cidr openapi format for this property. // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs // We are therefore using a regex pattern to validate instead. // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"` - // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"` } diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go index 743ddeab5..a0e124096 100644 --- a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go @@ -14,13 +14,13 @@ package v1 var map_ClusterNetwork = map[string]string{ "": "ClusterNetwork was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "network": "Network is a CIDR string specifying the global overlay network's L3 space", - "hostsubnetlength": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", - "serviceNetwork": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from", - "pluginName": "PluginName is the name of the network plugin being used", - "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", - "vxlanPort": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", - "mtu": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", + "network": "network is a CIDR string specifying the global overlay network's L3 space", + "hostsubnetlength": "hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", + "serviceNetwork": "serviceNetwork is the CIDR range that Service IP addresses are allocated from", + "pluginName": "pluginName is the name of the network plugin being used", + "clusterNetworks": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", + "vxlanPort": "vxlanPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", + "mtu": "mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", } func (ClusterNetwork) SwaggerDoc() map[string]string { @@ -30,7 +30,7 @@ func (ClusterNetwork) SwaggerDoc() map[string]string { var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", "CIDR": "CIDR defines the total range of a cluster networks address space.", - "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_ClusterNetworkList = map[string]string{ "": "ClusterNetworkList is a collection of ClusterNetworks\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of cluster networks", + "items": "items is the list of cluster networks", } func (ClusterNetworkList) SwaggerDoc() map[string]string { @@ -69,8 +69,8 @@ func (EgressNetworkPolicyList) SwaggerDoc() map[string]string { var map_EgressNetworkPolicyPeer = map[string]string{ "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to", - "cidrSelector": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", - "dnsName": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", + "cidrSelector": "cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", + "dnsName": "dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", } func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -99,11 +99,11 @@ func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string { var map_HostSubnet = map[string]string{ "": "HostSubnet was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "host": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)", - "hostIP": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network", - "subnet": "Subnet is the CIDR range of the overlay network assigned to the node for its pods", - "egressIPs": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", - "egressCIDRs": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", + "host": "host is the name of the node. (This is the same as the object's name, but both fields must be set.)", + "hostIP": "hostIP is the IP address to be used as a VTEP by other nodes in the overlay network", + "subnet": "subnet is the CIDR range of the overlay network assigned to the node for its pods", + "egressIPs": "egressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", + "egressCIDRs": "egressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", } func (HostSubnet) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HostSubnet) SwaggerDoc() map[string]string { var map_HostSubnetList = map[string]string{ "": "HostSubnetList is a collection of HostSubnets\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of host subnets", + "items": "items is the list of host subnets", } func (HostSubnetList) SwaggerDoc() map[string]string { @@ -123,9 +123,9 @@ func (HostSubnetList) SwaggerDoc() map[string]string { var map_NetNamespace = map[string]string{ "": "NetNamespace was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "netname": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", - "netid": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", - "egressIPs": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", + "netname": "netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", + "netid": "netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", + "egressIPs": "egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", } func (NetNamespace) SwaggerDoc() map[string]string { @@ -135,7 +135,7 @@ func (NetNamespace) SwaggerDoc() map[string]string { var map_NetNamespaceList = map[string]string{ "": "NetNamespaceList is a collection of NetNamespaces\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of net namespaces", + "items": "items is the list of net namespaces", } func (NetNamespaceList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go index 394f2e4ac..cd0d1b31a 100644 --- a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go +++ b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go @@ -26,7 +26,7 @@ type DNSNameResolver struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired behavior of the DNSNameResolver. - // +kubebuilder:validation:Required + // +required Spec DNSNameResolverSpec `json:"spec"` // status is the most recently observed status of the DNSNameResolver. // +optional @@ -47,7 +47,7 @@ type DNSNameResolverSpec struct { // For a wildcard DNS name, the '*' will match only one label. Additionally, only a single // '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' // will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec.name is immutable" Name DNSName `json:"name"` } @@ -82,12 +82,12 @@ type DNSNameResolverResolvedName struct { // will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. // If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard // DNS name as well. - // +kubebuilder:validation:Required + // +required DNSName DNSName `json:"dnsName"` // resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last // lookup times for the dnsName. - // +kubebuilder:validation:Required + // +required // +listType=map // +listMapKey=ip ResolvedAddresses []DNSNameResolverResolvedAddress `json:"resolvedAddresses"` @@ -106,21 +106,21 @@ type DNSNameResolverResolvedAddress struct { // lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon // the expiration of the IP address's validity. If the information is not refreshed then it will // be removed with a grace period after the expiration of the IP address's validity. - // +kubebuilder:validation:Required + // +required IP string `json:"ip"` // ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after // lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with // the current time-to-live value. If the information is not refreshed then it will be removed with a // grace period after the expiration of the IP address's validity. - // +kubebuilder:validation:Required + // +required TTLSeconds int32 `json:"ttlSeconds"` // lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of // the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to // the current time on a successful DNS lookup. If the information is not refreshed then it will be // removed with a grace period after the expiration of the IP address's validity. - // +kubebuilder:validation:Required + // +required LastLookupTime *metav1.Time `json:"lastLookupTime"` } diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto index 37c374557..1999f71e8 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto +++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto @@ -43,7 +43,7 @@ message EgressRouter { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired egress router. - // +kubebuilder:validation:Required + // +required optional EgressRouterSpec spec = 2; // Observed status of EgressRouter. @@ -53,8 +53,8 @@ message EgressRouter { // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface // +kubebuilder:validation:Required message EgressRouterAddress { - // IP is the address to configure on the router's interface. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. + // +required optional string ip = 1; // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. @@ -86,30 +86,32 @@ message EgressRouterList { // +k8s:openapi-gen=true // +kubebuilder:validation:Required message EgressRouterSpec { - // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + // +required // +kubebuilder:validation:Enum="Redirect" // +kubebuilder:default:="Redirect" optional string mode = 1; - // Redirect represents the configuration parameters specific to redirect mode. + // redirect represents the configuration parameters specific to redirect mode. optional RedirectConfig redirect = 2; // Specification of interface to create/use. The default is macvlan. // Currently only macvlan is supported. - // +kubebuilder:validation:Required + // +required // +kubebuilder:default:={macvlan: {mode: Bridge}} optional EgressRouterInterface networkInterface = 3; // List of IP addresses to configure on the pod's secondary interface. - // +kubebuilder:validation:Required + // +required repeated EgressRouterAddress addresses = 4; } // EgressRouterStatus contains the observed status of EgressRouter. Read-only. message EgressRouterStatus { // Observed status of the egress router - // +kubebuilder:validation:Required + // +required + // +listType=map + // +listMapKey=type repeated EgressRouterStatusCondition conditions = 1; } @@ -117,28 +119,25 @@ message EgressRouterStatus { // managed and monitored components. // +k8s:deepcopy-gen=true message EgressRouterStatusCondition { - // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded - // +kubebuilder:validation:Required + // type specifies the aspect reported by this condition; one of Available, Progressing, Degraded // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded" // +required optional string type = 1; - // Status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required + // status of the condition, one of True, False, Unknown. // +kubebuilder:validation:Enum="True";"False";"Unknown" // +required optional string status = 2; - // LastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required + // lastTransitionTime is the time of the last update to the current status property. // +required // +nullable optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Reason is the CamelCase reason for the condition's current status. + // reason is the CamelCase reason for the condition's current status. optional string reason = 4; - // Message provides additional information about the current condition. + // message provides additional information about the current condition. // This is only to be consumed by humans. It may contain Line Feed // characters (U+000A), which should be rendered as new lines. optional string message = 5; @@ -147,21 +146,21 @@ message EgressRouterStatusCondition { // L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. message L4RedirectRule { // IP specifies the remote destination's IP address. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // +required optional string destinationIP = 1; - // Port is the port number to which clients should send traffic to be redirected. - // +kubebuilder:validation:Required + // port is the port number to which clients should send traffic to be redirected. + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 optional int32 port = 2; - // Protocol can be TCP, SCTP or UDP. - // +kubebuilder:validation:Required + // protocol can be TCP, SCTP or UDP. + // +required // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" optional string protocol = 3; - // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. + // targetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. // If unspecified, the value from "Port" is used. // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 @@ -170,8 +169,8 @@ message L4RedirectRule { // MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType message MacvlanConfig { - // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + // +required // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru" // +kubebuilder:default:="Bridge" optional string mode = 1; @@ -185,7 +184,7 @@ message RedirectConfig { // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. repeated L4RedirectRule redirectRules = 1; - // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // fallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. // If no redirect rules are specified, all traffic from the router are redirected to this IP. // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. diff --git a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go index 9f11590e0..541c3b559 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go @@ -38,7 +38,7 @@ type EgressRouter struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired egress router. - // +kubebuilder:validation:Required + // +required Spec EgressRouterSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Observed status of EgressRouter. @@ -51,23 +51,23 @@ type EgressRouter struct { // +k8s:openapi-gen=true // +kubebuilder:validation:Required type EgressRouterSpec struct { - // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + // +required // +kubebuilder:validation:Enum="Redirect" // +kubebuilder:default:="Redirect" Mode EgressRouterMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=EgressRouterMode"` - // Redirect represents the configuration parameters specific to redirect mode. + // redirect represents the configuration parameters specific to redirect mode. Redirect *RedirectConfig `json:"redirect,omitempty" protobuf:"bytes,2,opt,name=redirect"` // Specification of interface to create/use. The default is macvlan. // Currently only macvlan is supported. - // +kubebuilder:validation:Required + // +required // +kubebuilder:default:={macvlan: {mode: Bridge}} NetworkInterface EgressRouterInterface `json:"networkInterface" protobuf:"bytes,3,opt,name=networkInterface"` // List of IP addresses to configure on the pod's secondary interface. - // +kubebuilder:validation:Required + // +required Addresses []EgressRouterAddress `json:"addresses" protobuf:"bytes,4,rep,name=addresses"` } @@ -86,7 +86,7 @@ type RedirectConfig struct { // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. RedirectRules []L4RedirectRule `json:"redirectRules,omitempty" protobuf:"bytes,1,rep,name=redirectRules"` - // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // fallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. // If no redirect rules are specified, all traffic from the router are redirected to this IP. // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. @@ -96,21 +96,21 @@ type RedirectConfig struct { // L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. type L4RedirectRule struct { // IP specifies the remote destination's IP address. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // +required DestinationIP string `json:"destinationIP" protobuf:"bytes,1,opt,name=destinationIP"` - // Port is the port number to which clients should send traffic to be redirected. - // +kubebuilder:validation:Required + // port is the port number to which clients should send traffic to be redirected. + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` - // Protocol can be TCP, SCTP or UDP. - // +kubebuilder:validation:Required + // protocol can be TCP, SCTP or UDP. + // +required // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" Protocol ProtocolType `json:"protocol" protobuf:"bytes,3,opt,name=protocol,casttype=ProtocolType"` - // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. + // targetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. // If unspecified, the value from "Port" is used. // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 @@ -165,8 +165,8 @@ const ( // MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType type MacvlanConfig struct { - // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + // +required // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru" // +kubebuilder:default:="Bridge" Mode MacvlanMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=MacvlanMode"` @@ -178,8 +178,8 @@ type MacvlanConfig struct { // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface // +kubebuilder:validation:Required type EgressRouterAddress struct { - // IP is the address to configure on the router's interface. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. + // +required IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. Gateway string `json:"gateway,omitempty" protobuf:"bytes,2,opt,name=gateway"` @@ -219,28 +219,25 @@ const ( // managed and monitored components. // +k8s:deepcopy-gen=true type EgressRouterStatusCondition struct { - // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded - // +kubebuilder:validation:Required + // type specifies the aspect reported by this condition; one of Available, Progressing, Degraded // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded" // +required Type EgressRouterStatusConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=EgressRouterStatusConditionType"` - // Status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required + // status of the condition, one of True, False, Unknown. // +kubebuilder:validation:Enum="True";"False";"Unknown" // +required Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // LastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required + // lastTransitionTime is the time of the last update to the current status property. // +required // +nullable LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is the CamelCase reason for the condition's current status. + // reason is the CamelCase reason for the condition's current status. Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Message provides additional information about the current condition. + // message provides additional information about the current condition. // This is only to be consumed by humans. It may contain Line Feed // characters (U+000A), which should be rendered as new lines. Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` @@ -249,7 +246,9 @@ type EgressRouterStatusCondition struct { // EgressRouterStatus contains the observed status of EgressRouter. Read-only. type EgressRouterStatus struct { // Observed status of the egress router - // +kubebuilder:validation:Required + // +required + // +listType=map + // +listMapKey=type Conditions []EgressRouterStatusCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` } diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go index 97bec9e29..000cb1903 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go @@ -24,7 +24,7 @@ func (EgressRouter) SwaggerDoc() map[string]string { var map_EgressRouterAddress = map[string]string{ "": "EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface", - "ip": "IP is the address to configure on the router's interface. Can be IPv4 or IPv6.", + "ip": "ip is the address to configure on the router's interface. Can be IPv4 or IPv6.", "gateway": "IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6.", } @@ -52,8 +52,8 @@ func (EgressRouterList) SwaggerDoc() map[string]string { var map_EgressRouterSpec = map[string]string{ "": "EgressRouterSpec contains the configuration for an egress router. Mode, networkInterface and addresses fields must be specified along with exactly one \"Config\" that matches the mode. Each config consists of parameters specific to that mode.", - "mode": "Mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", - "redirect": "Redirect represents the configuration parameters specific to redirect mode.", + "mode": "mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", + "redirect": "redirect represents the configuration parameters specific to redirect mode.", "networkInterface": "Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported.", "addresses": "List of IP addresses to configure on the pod's secondary interface.", } @@ -73,11 +73,11 @@ func (EgressRouterStatus) SwaggerDoc() map[string]string { var map_EgressRouterStatusCondition = map[string]string{ "": "EgressRouterStatusCondition represents the state of the egress router's managed and monitored components.", - "type": "Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "LastTransitionTime is the time of the last update to the current status property.", - "reason": "Reason is the CamelCase reason for the condition's current status.", - "message": "Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "type": "type specifies the aspect reported by this condition; one of Available, Progressing, Degraded", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", } func (EgressRouterStatusCondition) SwaggerDoc() map[string]string { @@ -87,9 +87,9 @@ func (EgressRouterStatusCondition) SwaggerDoc() map[string]string { var map_L4RedirectRule = map[string]string{ "": "L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port.", "destinationIP": "IP specifies the remote destination's IP address. Can be IPv4 or IPv6.", - "port": "Port is the port number to which clients should send traffic to be redirected.", - "protocol": "Protocol can be TCP, SCTP or UDP.", - "targetPort": "TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from \"Port\" is used.", + "port": "port is the port number to which clients should send traffic to be redirected.", + "protocol": "protocol can be TCP, SCTP or UDP.", + "targetPort": "targetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from \"Port\" is used.", } func (L4RedirectRule) SwaggerDoc() map[string]string { @@ -98,7 +98,7 @@ func (L4RedirectRule) SwaggerDoc() map[string]string { var map_MacvlanConfig = map[string]string{ "": "MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType", - "mode": "Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is \"Bridge\".", + "mode": "mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is \"Bridge\".", "master": "Name of the master interface. Need not be specified if it can be inferred from the IP address.", } @@ -109,7 +109,7 @@ func (MacvlanConfig) SwaggerDoc() map[string]string { var map_RedirectConfig = map[string]string{ "": "RedirectConfig represents the configuration parameters specific to redirect mode.", "redirectRules": "List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode.", - "fallbackIP": "FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.", + "fallbackIP": "fallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.", } func (RedirectConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.proto b/vendor/github.com/openshift/api/oauth/v1/generated.proto index 7630d896d..4a5474e0c 100644 --- a/vendor/github.com/openshift/api/oauth/v1/generated.proto +++ b/vendor/github.com/openshift/api/oauth/v1/generated.proto @@ -13,13 +13,13 @@ option go_package = "github.com/openshift/api/oauth/v1"; // ClusterRoleScopeRestriction describes restrictions on cluster role scopes message ClusterRoleScopeRestriction { - // RoleNames is the list of cluster roles that can referenced. * means anything + // roleNames is the list of cluster roles that can referenced. * means anything repeated string roleNames = 1; - // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + // namespaces is the list of namespaces that can be referenced. * means any of them (including *) repeated string namespaces = 2; - // AllowEscalation indicates whether you can request roles and their escalating resources + // allowEscalation indicates whether you can request roles and their escalating resources optional bool allowEscalation = 3; } @@ -37,31 +37,31 @@ message OAuthAccessToken { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this token. + // clientName references the client that created this token. optional string clientName = 2; - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. optional int64 expiresIn = 3; - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. repeated string scopes = 4; - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. optional string redirectURI = 5; - // UserName is the user name associated with this token + // userName is the user name associated with this token optional string userName = 6; - // UserUID is the unique UID associated with this token + // userUID is the unique UID associated with this token optional string userUID = 7; - // AuthorizeToken contains the token that authorized this token + // authorizeToken contains the token that authorized this token optional string authorizeToken = 8; - // RefreshToken is the value by which this token can be renewed. Can be blank. + // refreshToken is the value by which this token can be renewed. Can be blank. optional string refreshToken = 9; - // InactivityTimeoutSeconds is the value in seconds, from the + // inactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. optional int32 inactivityTimeoutSeconds = 10; @@ -76,7 +76,7 @@ message OAuthAccessTokenList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth access tokens + // items is the list of OAuth access tokens repeated OAuthAccessToken items = 2; } @@ -89,32 +89,32 @@ message OAuthAuthorizeToken { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this token. + // clientName references the client that created this token. optional string clientName = 2; - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. optional int64 expiresIn = 3; - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. repeated string scopes = 4; - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. optional string redirectURI = 5; - // State data from request + // state data from request optional string state = 6; - // UserName is the user name associated with this token + // userName is the user name associated with this token optional string userName = 7; - // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // userUID is the unique UID associated with this token. UserUID and UserName must both match // for this token to be valid. optional string userUID = 8; - // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + // codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 optional string codeChallenge = 9; - // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + // codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 optional string codeChallengeMethod = 10; } @@ -127,7 +127,7 @@ message OAuthAuthorizeTokenList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth authorization tokens + // items is the list of OAuth authorization tokens repeated OAuthAuthorizeToken items = 2; } @@ -140,36 +140,36 @@ message OAuthClient { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Secret is the unique secret associated with a client + // secret is the unique secret associated with a client optional string secret = 2; - // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation // and for service account token validation repeated string additionalSecrets = 3; - // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + // respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects optional bool respondWithChallenges = 4; - // RedirectURIs is the valid redirection URIs associated with a client + // redirectURIs is the valid redirection URIs associated with a client // +patchStrategy=merge repeated string redirectURIs = 5; - // GrantMethod is a required field which determines how to handle grants for this client. + // grantMethod is a required field which determines how to handle grants for this client. // Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients // - prompt: prompts the end user for approval of grant requests, useful for third-party clients optional string grantMethod = 6; - // ScopeRestrictions describes which scopes this client can request. Each requested scope + // scopeRestrictions describes which scopes this client can request. Each requested scope // is checked against each restriction. If any restriction matches, then the scope is allowed. // If no restriction matches, then the scope is denied. repeated ScopeRestriction scopeRestrictions = 7; - // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. // 0 means no expiration. optional int32 accessTokenMaxAgeSeconds = 8; - // AccessTokenInactivityTimeoutSeconds overrides the default token + // accessTokenInactivityTimeoutSeconds overrides the default token // inactivity timeout for tokens granted to this client. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not @@ -194,17 +194,17 @@ message OAuthClientAuthorization { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this authorization + // clientName references the client that created this authorization optional string clientName = 2; - // UserName is the user name that authorized this client + // userName is the user name that authorized this client optional string userName = 3; - // UserUID is the unique UID associated with this authorization. UserUID and UserName + // userUID is the unique UID associated with this authorization. UserUID and UserName // must both match for this authorization to be valid. optional string userUID = 4; - // Scopes is an array of the granted scopes. + // scopes is an array of the granted scopes. repeated string scopes = 5; } @@ -217,7 +217,7 @@ message OAuthClientAuthorizationList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth client authorizations + // items is the list of OAuth client authorizations repeated OAuthClientAuthorization items = 2; } @@ -230,7 +230,7 @@ message OAuthClientList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth clients + // items is the list of OAuth clients repeated OAuthClient items = 2; } @@ -264,7 +264,7 @@ message ScopeRestriction { // ExactValues means the scope has to match a particular set of strings exactly repeated string literals = 1; - // ClusterRole describes a set of restrictions for cluster role scoping. + // clusterRole describes a set of restrictions for cluster role scoping. optional ClusterRoleScopeRestriction clusterRole = 2; } @@ -276,31 +276,31 @@ message UserOAuthAccessToken { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this token. + // clientName references the client that created this token. optional string clientName = 2; - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. optional int64 expiresIn = 3; - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. repeated string scopes = 4; - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. optional string redirectURI = 5; - // UserName is the user name associated with this token + // userName is the user name associated with this token optional string userName = 6; - // UserUID is the unique UID associated with this token + // userUID is the unique UID associated with this token optional string userUID = 7; - // AuthorizeToken contains the token that authorized this token + // authorizeToken contains the token that authorized this token optional string authorizeToken = 8; - // RefreshToken is the value by which this token can be renewed. Can be blank. + // refreshToken is the value by which this token can be renewed. Can be blank. optional string refreshToken = 9; - // InactivityTimeoutSeconds is the value in seconds, from the + // inactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. optional int32 inactivityTimeoutSeconds = 10; diff --git a/vendor/github.com/openshift/api/oauth/v1/types.go b/vendor/github.com/openshift/api/oauth/v1/types.go index 026c527f5..5a70b4774 100644 --- a/vendor/github.com/openshift/api/oauth/v1/types.go +++ b/vendor/github.com/openshift/api/oauth/v1/types.go @@ -24,31 +24,31 @@ type OAuthAccessToken struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ClientName references the client that created this token. + // clientName references the client that created this token. ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` - // UserName is the user name associated with this token + // userName is the user name associated with this token UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"` - // UserUID is the unique UID associated with this token + // userUID is the unique UID associated with this token UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"` - // AuthorizeToken contains the token that authorized this token + // authorizeToken contains the token that authorized this token AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"` - // RefreshToken is the value by which this token can be renewed. Can be blank. + // refreshToken is the value by which this token can be renewed. Can be blank. RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"` - // InactivityTimeoutSeconds is the value in seconds, from the + // inactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"` @@ -69,32 +69,32 @@ type OAuthAuthorizeToken struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ClientName references the client that created this token. + // clientName references the client that created this token. ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` - // State data from request + // state data from request State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"` - // UserName is the user name associated with this token + // userName is the user name associated with this token UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"` - // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // userUID is the unique UID associated with this token. UserUID and UserName must both match // for this token to be valid. UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"` - // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + // codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"` - // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + // codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"` } @@ -113,36 +113,36 @@ type OAuthClient struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Secret is the unique secret associated with a client + // secret is the unique secret associated with a client Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` - // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation // and for service account token validation AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"` - // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + // respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"` - // RedirectURIs is the valid redirection URIs associated with a client + // redirectURIs is the valid redirection URIs associated with a client // +patchStrategy=merge RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"` - // GrantMethod is a required field which determines how to handle grants for this client. + // grantMethod is a required field which determines how to handle grants for this client. // Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients // - prompt: prompts the end user for approval of grant requests, useful for third-party clients GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"` - // ScopeRestrictions describes which scopes this client can request. Each requested scope + // scopeRestrictions describes which scopes this client can request. Each requested scope // is checked against each restriction. If any restriction matches, then the scope is allowed. // If no restriction matches, then the scope is denied. ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"` - // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. // 0 means no expiration. AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"` - // AccessTokenInactivityTimeoutSeconds overrides the default token + // accessTokenInactivityTimeoutSeconds overrides the default token // inactivity timeout for tokens granted to this client. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not @@ -174,17 +174,17 @@ type ScopeRestriction struct { // ExactValues means the scope has to match a particular set of strings exactly ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"` - // ClusterRole describes a set of restrictions for cluster role scoping. + // clusterRole describes a set of restrictions for cluster role scoping. ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"` } // ClusterRoleScopeRestriction describes restrictions on cluster role scopes type ClusterRoleScopeRestriction struct { - // RoleNames is the list of cluster roles that can referenced. * means anything + // roleNames is the list of cluster roles that can referenced. * means anything RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"` - // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + // namespaces is the list of namespaces that can be referenced. * means any of them (including *) Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` - // AllowEscalation indicates whether you can request roles and their escalating resources + // allowEscalation indicates whether you can request roles and their escalating resources AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"` } @@ -203,17 +203,17 @@ type OAuthClientAuthorization struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ClientName references the client that created this authorization + // clientName references the client that created this authorization ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` - // UserName is the user name that authorized this client + // userName is the user name that authorized this client UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"` - // UserUID is the unique UID associated with this authorization. UserUID and UserName + // userUID is the unique UID associated with this authorization. UserUID and UserName // must both match for this authorization to be valid. UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"` - // Scopes is an array of the granted scopes. + // scopes is an array of the granted scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` } @@ -230,7 +230,7 @@ type OAuthAccessTokenList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth access tokens + // items is the list of OAuth access tokens Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -247,7 +247,7 @@ type OAuthAuthorizeTokenList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth authorization tokens + // items is the list of OAuth authorization tokens Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -264,7 +264,7 @@ type OAuthClientList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth clients + // items is the list of OAuth clients Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -281,7 +281,7 @@ type OAuthClientAuthorizationList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth client authorizations + // items is the list of OAuth client authorizations Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go index f62b715c0..171b5221f 100644 --- a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go @@ -13,9 +13,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_ClusterRoleScopeRestriction = map[string]string{ "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes", - "roleNames": "RoleNames is the list of cluster roles that can referenced. * means anything", - "namespaces": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)", - "allowEscalation": "AllowEscalation indicates whether you can request roles and their escalating resources", + "roleNames": "roleNames is the list of cluster roles that can referenced. * means anything", + "namespaces": "namespaces is the list of namespaces that can be referenced. * means any of them (including *)", + "allowEscalation": "allowEscalation indicates whether you can request roles and their escalating resources", } func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { @@ -25,15 +25,15 @@ func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { var map_OAuthAccessToken = map[string]string{ "": "OAuthAccessToken describes an OAuth access token. The name of a token must be prefixed with a `sha256~` string, must not contain \"/\" or \"%\" characters and must be at least 32 characters long.\n\nThe name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded base64-encoding (as described in RFC4648) on the hashed result.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "clientName": "ClientName references the client that created this token.", - "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", - "scopes": "Scopes is an array of the requested scopes.", - "redirectURI": "RedirectURI is the redirection associated with the token.", - "userName": "UserName is the user name associated with this token", - "userUID": "UserUID is the unique UID associated with this token", - "authorizeToken": "AuthorizeToken contains the token that authorized this token", - "refreshToken": "RefreshToken is the value by which this token can be renewed. Can be blank.", - "inactivityTimeoutSeconds": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", + "clientName": "clientName references the client that created this token.", + "expiresIn": "expiresIn is the seconds from CreationTime before this token expires.", + "scopes": "scopes is an array of the requested scopes.", + "redirectURI": "redirectURI is the redirection associated with the token.", + "userName": "userName is the user name associated with this token", + "userUID": "userUID is the unique UID associated with this token", + "authorizeToken": "authorizeToken contains the token that authorized this token", + "refreshToken": "refreshToken is the value by which this token can be renewed. Can be blank.", + "inactivityTimeoutSeconds": "inactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", } func (OAuthAccessToken) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (OAuthAccessToken) SwaggerDoc() map[string]string { var map_OAuthAccessTokenList = map[string]string{ "": "OAuthAccessTokenList is a collection of OAuth access tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth access tokens", + "items": "items is the list of OAuth access tokens", } func (OAuthAccessTokenList) SwaggerDoc() map[string]string { @@ -53,15 +53,15 @@ func (OAuthAccessTokenList) SwaggerDoc() map[string]string { var map_OAuthAuthorizeToken = map[string]string{ "": "OAuthAuthorizeToken describes an OAuth authorization token\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "clientName": "ClientName references the client that created this token.", - "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", - "scopes": "Scopes is an array of the requested scopes.", - "redirectURI": "RedirectURI is the redirection associated with the token.", - "state": "State data from request", - "userName": "UserName is the user name associated with this token", - "userUID": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", - "codeChallenge": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", - "codeChallengeMethod": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", + "clientName": "clientName references the client that created this token.", + "expiresIn": "expiresIn is the seconds from CreationTime before this token expires.", + "scopes": "scopes is an array of the requested scopes.", + "redirectURI": "redirectURI is the redirection associated with the token.", + "state": "state data from request", + "userName": "userName is the user name associated with this token", + "userUID": "userUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", + "codeChallenge": "codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", + "codeChallengeMethod": "codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", } func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { @@ -71,7 +71,7 @@ func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { var map_OAuthAuthorizeTokenList = map[string]string{ "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth authorization tokens", + "items": "items is the list of OAuth authorization tokens", } func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { @@ -81,14 +81,14 @@ func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { var map_OAuthClient = map[string]string{ "": "OAuthClient describes an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "secret": "Secret is the unique secret associated with a client", - "additionalSecrets": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", - "respondWithChallenges": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", - "redirectURIs": "RedirectURIs is the valid redirection URIs associated with a client", - "grantMethod": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", - "scopeRestrictions": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", - "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", - "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", + "secret": "secret is the unique secret associated with a client", + "additionalSecrets": "additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", + "respondWithChallenges": "respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", + "redirectURIs": "redirectURIs is the valid redirection URIs associated with a client", + "grantMethod": "grantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", + "scopeRestrictions": "scopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", } func (OAuthClient) SwaggerDoc() map[string]string { @@ -98,10 +98,10 @@ func (OAuthClient) SwaggerDoc() map[string]string { var map_OAuthClientAuthorization = map[string]string{ "": "OAuthClientAuthorization describes an authorization created by an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "clientName": "ClientName references the client that created this authorization", - "userName": "UserName is the user name that authorized this client", - "userUID": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", - "scopes": "Scopes is an array of the granted scopes.", + "clientName": "clientName references the client that created this authorization", + "userName": "userName is the user name that authorized this client", + "userUID": "userUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", + "scopes": "scopes is an array of the granted scopes.", } func (OAuthClientAuthorization) SwaggerDoc() map[string]string { @@ -111,7 +111,7 @@ func (OAuthClientAuthorization) SwaggerDoc() map[string]string { var map_OAuthClientAuthorizationList = map[string]string{ "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth client authorizations", + "items": "items is the list of OAuth client authorizations", } func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { @@ -121,7 +121,7 @@ func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { var map_OAuthClientList = map[string]string{ "": "OAuthClientList is a collection of OAuth clients\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth clients", + "items": "items is the list of OAuth clients", } func (OAuthClientList) SwaggerDoc() map[string]string { @@ -152,7 +152,7 @@ func (RedirectReference) SwaggerDoc() map[string]string { var map_ScopeRestriction = map[string]string{ "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.", "literals": "ExactValues means the scope has to match a particular set of strings exactly", - "clusterRole": "ClusterRole describes a set of restrictions for cluster role scoping.", + "clusterRole": "clusterRole describes a set of restrictions for cluster role scoping.", } func (ScopeRestriction) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/openapi/openapi.json b/vendor/github.com/openshift/api/openapi/openapi.json index 5decc918e..986ed74f9 100644 --- a/vendor/github.com/openshift/api/openapi/openapi.json +++ b/vendor/github.com/openshift/api/openapi/openapi.json @@ -82,7 +82,6 @@ "com.github.openshift.api.apiserver.v1.APIRequestCountStatus": { "type": "object", "required": [ - "conditions", "requestCount" ], "properties": { @@ -93,8 +92,10 @@ "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" }, - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "currentHour": { "description": "currentHour contains request history for the current hour. This is porcelain to make the API easier to read by humans seeing if they addressed a problem. This field is reset on the hour.", @@ -204,7 +205,7 @@ "default": "" }, "username": { - "description": "userName that made the request.", + "description": "username that made the request.", "type": "string", "default": "" } @@ -236,7 +237,7 @@ "type": "object", "properties": { "command": { - "description": "Command is optional and overrides CMD in the container Image.", + "description": "command is optional and overrides CMD in the container Image.", "type": "array", "items": { "type": "string", @@ -244,7 +245,7 @@ } }, "environment": { - "description": "Environment holds the environment which will be given to the container for Image.", + "description": "environment holds the environment which will be given to the container for Image.", "type": "array", "items": { "default": {}, @@ -252,7 +253,7 @@ } }, "image": { - "description": "Image specifies a container image which can carry out a deployment.", + "description": "image specifies a container image which can carry out a deployment.", "type": "string" } } @@ -265,11 +266,11 @@ ], "properties": { "imageTrigger": { - "description": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change", + "description": "imageTrigger contains the image trigger details, if this trigger was fired based on an image change", "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentCauseImageTrigger" }, "type": { - "description": "Type of the trigger that resulted in the creation of a new deployment", + "description": "type of the trigger that resulted in the creation of a new deployment", "type": "string", "default": "" } @@ -283,7 +284,7 @@ ], "properties": { "from": { - "description": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", + "description": "from is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" } @@ -314,12 +315,12 @@ "type": "string" }, "status": { - "description": "Status of the condition, one of True, False, Unknown.", + "description": "status of the condition, one of True, False, Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type of deployment condition.", + "description": "type of deployment condition.", "type": "string", "default": "" } @@ -346,12 +347,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec represents a desired deployment state and how to deploy to it.", + "description": "spec represents a desired deployment state and how to deploy to it.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentConfigSpec" }, "status": { - "description": "Status represents the current deployment state.", + "description": "status represents the current deployment state.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentConfigStatus" } @@ -369,7 +370,7 @@ "type": "string" }, "items": { - "description": "Items is a list of deployment configs", + "description": "items is a list of deployment configs", "type": "array", "items": { "default": {}, @@ -404,17 +405,17 @@ "type": "string" }, "name": { - "description": "Name of the deployment config that will be rolled back.", + "description": "name of the deployment config that will be rolled back.", "type": "string", "default": "" }, "spec": { - "description": "Spec defines the options to rollback generation.", + "description": "spec defines the options to rollback generation.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentConfigRollbackSpec" }, "updatedAnnotations": { - "description": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.", + "description": "updatedAnnotations is a set of new annotations that will be added in the deployment config.", "type": "object", "additionalProperties": { "type": "string", @@ -435,32 +436,32 @@ ], "properties": { "from": { - "description": "From points to a ReplicationController which is a deployment.", + "description": "from points to a ReplicationController which is a deployment.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "includeReplicationMeta": { - "description": "IncludeReplicationMeta specifies whether to include the replica count and selector.", + "description": "includeReplicationMeta specifies whether to include the replica count and selector.", "type": "boolean", "default": false }, "includeStrategy": { - "description": "IncludeStrategy specifies whether to include the deployment Strategy.", + "description": "includeStrategy specifies whether to include the deployment Strategy.", "type": "boolean", "default": false }, "includeTemplate": { - "description": "IncludeTemplate specifies whether to include the PodTemplateSpec.", + "description": "includeTemplate specifies whether to include the PodTemplateSpec.", "type": "boolean", "default": false }, "includeTriggers": { - "description": "IncludeTriggers specifies whether to include config Triggers.", + "description": "includeTriggers specifies whether to include config Triggers.", "type": "boolean", "default": false }, "revision": { - "description": "Revision to rollback to. If set to 0, rollback to the last revision.", + "description": "revision to rollback to. If set to 0, rollback to the last revision.", "type": "integer", "format": "int64" } @@ -471,27 +472,27 @@ "type": "object", "properties": { "minReadySeconds": { - "description": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "description": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "type": "integer", "format": "int32" }, "paused": { - "description": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "description": "paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", "type": "boolean" }, "replicas": { - "description": "Replicas is the number of desired replicas.", + "description": "replicas is the number of desired replicas.", "type": "integer", "format": "int32", "default": 0 }, "revisionHistoryLimit": { - "description": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "description": "revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", "type": "integer", "format": "int32" }, "selector": { - "description": "Selector is a label query over pods that should match the Replicas count.", + "description": "selector is a label query over pods that should match the Replicas count.", "type": "object", "additionalProperties": { "type": "string", @@ -499,21 +500,21 @@ } }, "strategy": { - "description": "Strategy describes how a deployment is executed.", + "description": "strategy describes how a deployment is executed.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentStrategy" }, "template": { - "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected.", + "description": "template is the object that describes the pod that will be created if insufficient replicas are detected.", "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" }, "test": { - "description": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "description": "test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", "type": "boolean", "default": false }, "triggers": { - "description": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "description": "triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", "type": "array", "items": { "default": {}, @@ -535,13 +536,13 @@ ], "properties": { "availableReplicas": { - "description": "AvailableReplicas is the total number of available pods targeted by this deployment config.", + "description": "availableReplicas is the total number of available pods targeted by this deployment config.", "type": "integer", "format": "int32", "default": 0 }, "conditions": { - "description": "Conditions represents the latest available observations of a deployment config's current state.", + "description": "conditions represents the latest available observations of a deployment config's current state.", "type": "array", "items": { "default": {}, @@ -551,17 +552,17 @@ "x-kubernetes-patch-strategy": "merge" }, "details": { - "description": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "description": "details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentDetails" }, "latestVersion": { - "description": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "description": "latestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", "type": "integer", "format": "int64", "default": 0 }, "observedGeneration": { - "description": "ObservedGeneration is the most recent generation observed by the deployment config controller.", + "description": "observedGeneration is the most recent generation observed by the deployment config controller.", "type": "integer", "format": "int64", "default": 0 @@ -572,19 +573,19 @@ "format": "int32" }, "replicas": { - "description": "Replicas is the total number of pods targeted by this deployment config.", + "description": "replicas is the total number of pods targeted by this deployment config.", "type": "integer", "format": "int32", "default": 0 }, "unavailableReplicas": { - "description": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "description": "unavailableReplicas is the total number of unavailable pods targeted by this deployment config.", "type": "integer", "format": "int32", "default": 0 }, "updatedReplicas": { - "description": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "description": "updatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", "type": "integer", "format": "int32", "default": 0 @@ -599,7 +600,7 @@ ], "properties": { "causes": { - "description": "Causes are extended data associated with all the causes for creating a new deployment", + "description": "causes are extended data associated with all the causes for creating a new deployment", "type": "array", "items": { "default": {}, @@ -607,7 +608,7 @@ } }, "message": { - "description": "Message is the user specified change message, if this deployment was triggered manually by the user", + "description": "message is the user specified change message, if this deployment was triggered manually by the user", "type": "string" } } @@ -639,7 +640,7 @@ "type": "string" }, "follow": { - "description": "Follow if true indicates that the build log should be streamed until the build terminates.", + "description": "follow if true indicates that the build log should be streamed until the build terminates.", "type": "boolean" }, "kind": { @@ -652,7 +653,7 @@ "format": "int64" }, "nowait": { - "description": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "description": "nowait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", "type": "boolean" }, "previous": { @@ -678,7 +679,7 @@ "type": "boolean" }, "version": { - "description": "Version of the deployment for which to view logs.", + "description": "version of the deployment for which to view logs.", "type": "integer", "format": "int64" } @@ -698,7 +699,7 @@ "type": "string" }, "excludeTriggers": { - "description": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", + "description": "excludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", "type": "array", "items": { "type": "string", @@ -706,7 +707,7 @@ } }, "force": { - "description": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "description": "force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", "type": "boolean", "default": false }, @@ -715,12 +716,12 @@ "type": "string" }, "latest": { - "description": "Latest will update the deployment config with the latest state from all triggers.", + "description": "latest will update the deployment config with the latest state from all triggers.", "type": "boolean", "default": false }, "name": { - "description": "Name of the deployment config for requesting a new deployment.", + "description": "name of the deployment config for requesting a new deployment.", "type": "string", "default": "" } @@ -731,12 +732,12 @@ "type": "object", "properties": { "activeDeadlineSeconds": { - "description": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", + "description": "activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", "type": "integer", "format": "int64" }, "annotations": { - "description": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "description": "annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", "type": "object", "additionalProperties": { "type": "string", @@ -744,11 +745,11 @@ } }, "customParams": { - "description": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "description": "customParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.CustomDeploymentStrategyParams" }, "labels": { - "description": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "description": "labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", "type": "object", "additionalProperties": { "type": "string", @@ -756,20 +757,20 @@ } }, "recreateParams": { - "description": "RecreateParams are the input to the Recreate deployment strategy.", + "description": "recreateParams are the input to the Recreate deployment strategy.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.RecreateDeploymentStrategyParams" }, "resources": { - "description": "Resources contains resource requirements to execute the deployment and any hooks.", + "description": "resources contains resource requirements to execute the deployment and any hooks.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, "rollingParams": { - "description": "RollingParams are the input to the Rolling deployment strategy.", + "description": "rollingParams are the input to the Rolling deployment strategy.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.RollingDeploymentStrategyParams" }, "type": { - "description": "Type is the name of a deployment strategy.", + "description": "type is the name of a deployment strategy.", "type": "string" } } @@ -782,11 +783,11 @@ ], "properties": { "automatic": { - "description": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "description": "automatic means that the detection of a new tag value should result in an image update inside the pod template.", "type": "boolean" }, "containerNames": { - "description": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "description": "containerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", "type": "array", "items": { "type": "string", @@ -794,12 +795,12 @@ } }, "from": { - "description": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "description": "from is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "lastTriggeredImage": { - "description": "LastTriggeredImage is the last image to be triggered.", + "description": "lastTriggeredImage is the last image to be triggered.", "type": "string" } } @@ -809,11 +810,11 @@ "type": "object", "properties": { "imageChangeParams": { - "description": "ImageChangeParams represents the parameters for the ImageChange trigger.", + "description": "imageChangeParams represents the parameters for the ImageChange trigger.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.DeploymentTriggerImageChangeParams" }, "type": { - "description": "Type of the trigger", + "description": "type of the trigger", "type": "string" } } @@ -827,7 +828,7 @@ ], "properties": { "command": { - "description": "Command is the action command and its arguments.", + "description": "command is the action command and its arguments.", "type": "array", "items": { "type": "string", @@ -835,12 +836,12 @@ } }, "containerName": { - "description": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "description": "containerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", "type": "string", "default": "" }, "env": { - "description": "Env is a set of environment variables to supply to the hook pod's container.", + "description": "env is a set of environment variables to supply to the hook pod's container.", "type": "array", "items": { "default": {}, @@ -848,7 +849,7 @@ } }, "volumes": { - "description": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", + "description": "volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", "type": "array", "items": { "type": "string", @@ -865,16 +866,16 @@ ], "properties": { "execNewPod": { - "description": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.", + "description": "execNewPod specifies the options for a lifecycle hook backed by a pod.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.ExecNewPodHook" }, "failurePolicy": { - "description": "FailurePolicy specifies what action to take if the hook fails.", + "description": "failurePolicy specifies what action to take if the hook fails.", "type": "string", "default": "" }, "tagImages": { - "description": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", + "description": "tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", "type": "array", "items": { "default": {}, @@ -888,19 +889,19 @@ "type": "object", "properties": { "mid": { - "description": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "description": "mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.LifecycleHook" }, "post": { - "description": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", + "description": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.LifecycleHook" }, "pre": { - "description": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "description": "pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.LifecycleHook" }, "timeoutSeconds": { - "description": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "description": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", "type": "integer", "format": "int64" } @@ -911,33 +912,33 @@ "type": "object", "properties": { "intervalSeconds": { - "description": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "description": "intervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", "type": "integer", "format": "int64" }, "maxSurge": { - "description": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "description": "maxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" }, "maxUnavailable": { - "description": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "description": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" }, "post": { - "description": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", + "description": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.LifecycleHook" }, "pre": { - "description": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "description": "pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", "$ref": "#/definitions/com.github.openshift.api.apps.v1.LifecycleHook" }, "timeoutSeconds": { - "description": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "description": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", "type": "integer", "format": "int64" }, "updatePeriodSeconds": { - "description": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "description": "updatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", "type": "integer", "format": "int64" } @@ -952,12 +953,12 @@ ], "properties": { "containerName": { - "description": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "description": "containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", "type": "string", "default": "" }, "to": { - "description": "To is the target ImageStreamTag to set the container's image onto.", + "description": "to is the target ImageStreamTag to set the container's image onto.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" } @@ -978,26 +979,26 @@ ], "properties": { "content": { - "description": "Content is the actual content of the request for create and update", + "description": "content is the actual content of the request for create and update", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "isNonResourceURL": { - "description": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "description": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", "type": "boolean", "default": false }, "namespace": { - "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "description": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", "type": "string", "default": "" }, "path": { - "description": "Path is the path of a non resource URL", + "description": "path is the path of a non resource URL", "type": "string", "default": "" }, "resource": { - "description": "Resource is one of the existing resource types", + "description": "resource is one of the existing resource types", "type": "string", "default": "" }, @@ -1012,12 +1013,12 @@ "default": "" }, "resourceName": { - "description": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "description": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", "type": "string", "default": "" }, "verb": { - "description": "Verb is one of: get, list, watch, create, update, delete", + "description": "verb is one of: get, list, watch, create, update, delete", "type": "string", "default": "" } @@ -1031,7 +1032,7 @@ ], "properties": { "aggregationRule": { - "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + "description": "aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule" }, "apiVersion": { @@ -1048,7 +1049,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "rules": { - "description": "Rules holds all the PolicyRules for this ClusterRole", + "description": "rules holds all the PolicyRules for this ClusterRole", "type": "array", "items": { "default": {}, @@ -1070,7 +1071,7 @@ "type": "string" }, "groupNames": { - "description": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "description": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", "type": "array", "items": { "type": "string", @@ -1087,12 +1088,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "roleRef": { - "description": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", + "description": "roleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "subjects": { - "description": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "description": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", "type": "array", "items": { "default": {}, @@ -1100,7 +1101,7 @@ } }, "userNames": { - "description": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "description": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", "type": "array", "items": { "type": "string", @@ -1121,7 +1122,7 @@ "type": "string" }, "items": { - "description": "Items is a list of ClusterRoleBindings", + "description": "items is a list of ClusterRoleBindings", "type": "array", "items": { "default": {}, @@ -1151,7 +1152,7 @@ "type": "string" }, "items": { - "description": "Items is a list of ClusterRoles", + "description": "items is a list of ClusterRoles", "type": "array", "items": { "default": {}, @@ -1178,7 +1179,7 @@ ], "properties": { "groups": { - "description": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", + "description": "groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", "type": "array", "items": { "type": "string", @@ -1228,11 +1229,11 @@ "type": "string" }, "content": { - "description": "Content is the actual content of the request for create and update", + "description": "content is the actual content of the request for create and update", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "isNonResourceURL": { - "description": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "description": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", "type": "boolean", "default": false }, @@ -1246,17 +1247,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "namespace": { - "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "description": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", "type": "string", "default": "" }, "path": { - "description": "Path is the path of a non resource URL", + "description": "path is the path of a non resource URL", "type": "string", "default": "" }, "resource": { - "description": "Resource is one of the existing resource types", + "description": "resource is one of the existing resource types", "type": "string", "default": "" }, @@ -1271,12 +1272,12 @@ "default": "" }, "resourceName": { - "description": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "description": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", "type": "string", "default": "" }, "verb": { - "description": "Verb is one of: get, list, watch, create, update, delete", + "description": "verb is one of: get, list, watch, create, update, delete", "type": "string", "default": "" } @@ -1304,11 +1305,11 @@ "type": "string" }, "content": { - "description": "Content is the actual content of the request for create and update", + "description": "content is the actual content of the request for create and update", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "groups": { - "description": "Groups is optional. Groups is the list of groups to which the User belongs.", + "description": "groups is optional. Groups is the list of groups to which the User belongs.", "type": "array", "items": { "type": "string", @@ -1316,7 +1317,7 @@ } }, "isNonResourceURL": { - "description": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "description": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", "type": "boolean", "default": false }, @@ -1330,17 +1331,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "namespace": { - "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "description": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", "type": "string", "default": "" }, "path": { - "description": "Path is the path of a non resource URL", + "description": "path is the path of a non resource URL", "type": "string", "default": "" }, "resource": { - "description": "Resource is one of the existing resource types", + "description": "resource is one of the existing resource types", "type": "string", "default": "" }, @@ -1355,12 +1356,12 @@ "default": "" }, "resourceName": { - "description": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "description": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", "type": "string", "default": "" }, "scopes": { - "description": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", + "description": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", "type": "array", "items": { "type": "string", @@ -1368,12 +1369,12 @@ } }, "user": { - "description": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "description": "user is optional. If both User and Groups are empty, the current authenticated user is used.", "type": "string", "default": "" }, "verb": { - "description": "Verb is one of: get, list, watch, create, update, delete", + "description": "verb is one of: get, list, watch, create, update, delete", "type": "string", "default": "" } @@ -1388,12 +1389,12 @@ ], "properties": { "name": { - "description": "Name is the name of the cluster role", + "description": "name is the name of the cluster role", "type": "string", "default": "" }, "role": { - "description": "Role is the cluster role being named", + "description": "role is the cluster role being named", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.ClusterRole" } @@ -1408,12 +1409,12 @@ ], "properties": { "name": { - "description": "Name is the name of the cluster role binding", + "description": "name is the name of the cluster role binding", "type": "string", "default": "" }, "roleBinding": { - "description": "RoleBinding is the cluster role binding being named", + "description": "roleBinding is the cluster role binding being named", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.ClusterRoleBinding" } @@ -1428,12 +1429,12 @@ ], "properties": { "name": { - "description": "Name is the name of the role", + "description": "name is the name of the role", "type": "string", "default": "" }, "role": { - "description": "Role is the role being named", + "description": "role is the role being named", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.Role" } @@ -1448,12 +1449,12 @@ ], "properties": { "name": { - "description": "Name is the name of the role binding", + "description": "name is the name of the role binding", "type": "string", "default": "" }, "roleBinding": { - "description": "RoleBinding is the role binding being named", + "description": "roleBinding is the role binding being named", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.RoleBinding" } @@ -1468,7 +1469,7 @@ ], "properties": { "apiGroups": { - "description": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", + "description": "apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", "type": "array", "items": { "type": "string", @@ -1476,7 +1477,7 @@ } }, "attributeRestrictions": { - "description": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "description": "attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "nonResourceURLs": { @@ -1488,7 +1489,7 @@ } }, "resourceNames": { - "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "description": "resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "type": "array", "items": { "type": "string", @@ -1496,7 +1497,7 @@ } }, "resources": { - "description": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "description": "resources is a list of resources this rule applies to. ResourceAll represents all resources.", "type": "array", "items": { "type": "string", @@ -1504,7 +1505,7 @@ } }, "verbs": { - "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "description": "verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", "type": "array", "items": { "type": "string", @@ -1532,11 +1533,11 @@ "type": "string" }, "content": { - "description": "Content is the actual content of the request for create and update", + "description": "content is the actual content of the request for create and update", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "isNonResourceURL": { - "description": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "description": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", "type": "boolean", "default": false }, @@ -1550,17 +1551,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "namespace": { - "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "description": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", "type": "string", "default": "" }, "path": { - "description": "Path is the path of a non resource URL", + "description": "path is the path of a non resource URL", "type": "string", "default": "" }, "resource": { - "description": "Resource is one of the existing resource types", + "description": "resource is one of the existing resource types", "type": "string", "default": "" }, @@ -1575,12 +1576,12 @@ "default": "" }, "resourceName": { - "description": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "description": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", "type": "string", "default": "" }, "verb": { - "description": "Verb is one of: get, list, watch, create, update, delete", + "description": "verb is one of: get, list, watch, create, update, delete", "type": "string", "default": "" } @@ -1617,7 +1618,7 @@ "type": "string" }, "namespace": { - "description": "Namespace is the namespace used for the access review", + "description": "namespace is the namespace used for the access review", "type": "string" }, "users": { @@ -1651,7 +1652,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "rules": { - "description": "Rules holds all the PolicyRules for this Role", + "description": "rules holds all the PolicyRules for this Role", "type": "array", "items": { "default": {}, @@ -1673,7 +1674,7 @@ "type": "string" }, "groupNames": { - "description": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "description": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", "type": "array", "items": { "type": "string", @@ -1690,12 +1691,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "roleRef": { - "description": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", + "description": "roleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "subjects": { - "description": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "description": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", "type": "array", "items": { "default": {}, @@ -1703,7 +1704,7 @@ } }, "userNames": { - "description": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "description": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", "type": "array", "items": { "type": "string", @@ -1724,7 +1725,7 @@ "type": "string" }, "items": { - "description": "Items is a list of RoleBindings", + "description": "items is a list of RoleBindings", "type": "array", "items": { "default": {}, @@ -1764,7 +1765,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec defines the matcher.", + "description": "spec defines the matcher.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.RoleBindingRestrictionSpec" } @@ -1782,7 +1783,7 @@ "type": "string" }, "items": { - "description": "Items is a list of RoleBindingRestriction objects.", + "description": "items is a list of RoleBindingRestriction objects.", "type": "array", "items": { "default": {}, @@ -1810,15 +1811,15 @@ ], "properties": { "grouprestriction": { - "description": "GroupRestriction matches against group subjects.", + "description": "grouprestriction matches against group subjects.", "$ref": "#/definitions/com.github.openshift.api.authorization.v1.GroupRestriction" }, "serviceaccountrestriction": { - "description": "ServiceAccountRestriction matches against service-account subjects.", + "description": "serviceaccountrestriction matches against service-account subjects.", "$ref": "#/definitions/com.github.openshift.api.authorization.v1.ServiceAccountRestriction" }, "userrestriction": { - "description": "UserRestriction matches against user subjects.", + "description": "userrestriction matches against user subjects.", "$ref": "#/definitions/com.github.openshift.api.authorization.v1.UserRestriction" } } @@ -1835,7 +1836,7 @@ "type": "string" }, "items": { - "description": "Items is a list of Roles", + "description": "items is a list of Roles", "type": "array", "items": { "default": {}, @@ -1874,12 +1875,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec adds information about how to conduct the check", + "description": "spec adds information about how to conduct the check", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.SelfSubjectRulesReviewSpec" }, "status": { - "description": "Status is completed by the server to tell which permissions you have", + "description": "status is completed by the server to tell which permissions you have", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.SubjectRulesReviewStatus" } @@ -1893,7 +1894,7 @@ ], "properties": { "scopes": { - "description": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", + "description": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", "type": "array", "items": { "type": "string", @@ -1911,12 +1912,12 @@ ], "properties": { "name": { - "description": "Name is the name of the service account.", + "description": "name is the name of the service account.", "type": "string", "default": "" }, "namespace": { - "description": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", + "description": "namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", "type": "string", "default": "" } @@ -1931,7 +1932,7 @@ ], "properties": { "namespaces": { - "description": "Namespaces specifies a list of literal namespace names.", + "description": "namespaces specifies a list of literal namespace names.", "type": "array", "items": { "type": "string", @@ -1939,7 +1940,7 @@ } }, "serviceaccounts": { - "description": "ServiceAccounts specifies a list of literal service-account names.", + "description": "serviceaccounts specifies a list of literal service-account names.", "type": "array", "items": { "default": {}, @@ -1970,7 +1971,7 @@ "type": "string" }, "content": { - "description": "Content is the actual content of the request for create and update", + "description": "content is the actual content of the request for create and update", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "groups": { @@ -1982,7 +1983,7 @@ } }, "isNonResourceURL": { - "description": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "description": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", "type": "boolean", "default": false }, @@ -1996,17 +1997,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "namespace": { - "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "description": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", "type": "string", "default": "" }, "path": { - "description": "Path is the path of a non resource URL", + "description": "path is the path of a non resource URL", "type": "string", "default": "" }, "resource": { - "description": "Resource is one of the existing resource types", + "description": "resource is one of the existing resource types", "type": "string", "default": "" }, @@ -2021,12 +2022,12 @@ "default": "" }, "resourceName": { - "description": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "description": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", "type": "string", "default": "" }, "scopes": { - "description": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", + "description": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", "type": "array", "items": { "type": "string", @@ -2034,12 +2035,12 @@ } }, "user": { - "description": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "description": "user is optional. If both User and Groups are empty, the current authenticated user is used.", "type": "string", "default": "" }, "verb": { - "description": "Verb is one of: get, list, watch, create, update, delete", + "description": "verb is one of: get, list, watch, create, update, delete", "type": "string", "default": "" } @@ -2053,7 +2054,7 @@ ], "properties": { "allowed": { - "description": "Allowed is required. True if the action would be allowed, false otherwise.", + "description": "allowed is required. True if the action would be allowed, false otherwise.", "type": "boolean", "default": false }, @@ -2062,7 +2063,7 @@ "type": "string" }, "evaluationError": { - "description": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", + "description": "evaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", "type": "string" }, "kind": { @@ -2070,11 +2071,11 @@ "type": "string" }, "namespace": { - "description": "Namespace is the namespace used for the access review", + "description": "namespace is the namespace used for the access review", "type": "string" }, "reason": { - "description": "Reason is optional. It indicates why a request was allowed or denied.", + "description": "reason is optional. It indicates why a request was allowed or denied.", "type": "string" } } @@ -2100,12 +2101,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec adds information about how to conduct the check", + "description": "spec adds information about how to conduct the check", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.SubjectRulesReviewSpec" }, "status": { - "description": "Status is completed by the server to tell which permissions you have", + "description": "status is completed by the server to tell which permissions you have", "default": {}, "$ref": "#/definitions/com.github.openshift.api.authorization.v1.SubjectRulesReviewStatus" } @@ -2121,7 +2122,7 @@ ], "properties": { "groups": { - "description": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", + "description": "groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", "type": "array", "items": { "type": "string", @@ -2129,7 +2130,7 @@ } }, "scopes": { - "description": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", + "description": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", "type": "array", "items": { "type": "string", @@ -2137,7 +2138,7 @@ } }, "user": { - "description": "User is optional. At least one of User and Groups must be specified.", + "description": "user is optional. At least one of User and Groups must be specified.", "type": "string", "default": "" } @@ -2151,11 +2152,11 @@ ], "properties": { "evaluationError": { - "description": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", + "description": "evaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", "type": "string" }, "rules": { - "description": "Rules is the list of rules (no particular sort) that are allowed for the subject", + "description": "rules is the list of rules (no particular sort) that are allowed for the subject", "type": "array", "items": { "default": {}, @@ -2174,7 +2175,7 @@ ], "properties": { "groups": { - "description": "Groups specifies a list of literal group names.", + "description": "groups specifies a list of literal group names.", "type": "array", "items": { "type": "string", @@ -2190,7 +2191,7 @@ } }, "users": { - "description": "Users specifies a list of literal user names.", + "description": "users specifies a list of literal user names.", "type": "array", "items": { "type": "string", @@ -2261,11 +2262,11 @@ "type": "object", "properties": { "revision": { - "description": "Revision is the git source revision information of the trigger.", + "description": "revision is the git source revision information of the trigger.", "$ref": "#/definitions/com.github.openshift.api.build.v1.SourceRevision" }, "secret": { - "description": "Secret is the obfuscated webhook secret that triggered a build.", + "description": "secret is the obfuscated webhook secret that triggered a build.", "type": "string" } } @@ -2324,12 +2325,12 @@ "type": "string" }, "status": { - "description": "Status of the condition, one of True, False, Unknown.", + "description": "status of the condition, one of True, False, Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type of build condition.", + "description": "type of build condition.", "type": "string", "default": "" } @@ -2446,7 +2447,7 @@ "$ref": "#/definitions/com.github.openshift.api.build.v1.SourceRevision" }, "runPolicy": { - "description": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "description": "runPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", "type": "string" }, "serviceAccount": { @@ -2486,7 +2487,7 @@ ], "properties": { "imageChangeTriggers": { - "description": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", + "description": "imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", "type": "array", "items": { "default": {}, @@ -2575,7 +2576,7 @@ "format": "int64" }, "nowait": { - "description": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "description": "nowait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", "type": "boolean" }, "previous": { @@ -2620,7 +2621,7 @@ } }, "pushSecret": { - "description": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "description": "pushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "to": { @@ -2668,7 +2669,7 @@ "$ref": "#/definitions/com.github.openshift.api.build.v1.BinaryBuildSource" }, "dockerStrategyOptions": { - "description": "DockerStrategyOptions contains additional docker-strategy specific options for the build", + "description": "dockerStrategyOptions contains additional docker-strategy specific options for the build", "$ref": "#/definitions/com.github.openshift.api.build.v1.DockerStrategyOptions" }, "env": { @@ -2702,7 +2703,7 @@ "$ref": "#/definitions/com.github.openshift.api.build.v1.SourceRevision" }, "sourceStrategyOptions": { - "description": "SourceStrategyOptions contains additional source-strategy specific options for the build", + "description": "sourceStrategyOptions contains additional source-strategy specific options for the build", "$ref": "#/definitions/com.github.openshift.api.build.v1.SourceStrategyOptions" }, "triggeredBy": { @@ -2856,7 +2857,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "conditions": { - "description": "Conditions represents the latest available observations of a build's current state.", + "description": "conditions represents the latest available observations of a build's current state.", "type": "array", "items": { "default": {}, @@ -2947,7 +2948,7 @@ "$ref": "#/definitions/com.github.openshift.api.build.v1.DockerBuildStrategy" }, "jenkinsPipelineStrategy": { - "description": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", + "description": "jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", "$ref": "#/definitions/com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy" }, "sourceStrategy": { @@ -2965,7 +2966,7 @@ "type": "object", "properties": { "bitbucketWebHook": { - "description": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", + "description": "bitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", "$ref": "#/definitions/com.github.openshift.api.build.v1.BitbucketWebHookCause" }, "genericWebHook": { @@ -2973,11 +2974,11 @@ "$ref": "#/definitions/com.github.openshift.api.build.v1.GenericWebHookCause" }, "githubWebHook": { - "description": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", + "description": "githubWebHook represents data for a GitHub webhook that fired a specific build.", "$ref": "#/definitions/com.github.openshift.api.build.v1.GitHubWebHookCause" }, "gitlabWebHook": { - "description": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.", + "description": "gitlabWebHook represents data for a GitLab webhook that fired a specific build.", "$ref": "#/definitions/com.github.openshift.api.build.v1.GitLabWebHookCause" }, "imageChangeBuild": { @@ -3163,11 +3164,11 @@ "type": "object", "properties": { "revision": { - "description": "Revision is the git source revision information of the trigger.", + "description": "revision is the git source revision information of the trigger.", "$ref": "#/definitions/com.github.openshift.api.build.v1.SourceRevision" }, "secret": { - "description": "Secret is the obfuscated webhook secret that triggered a build.", + "description": "secret is the obfuscated webhook secret that triggered a build.", "type": "string" } } @@ -3333,7 +3334,7 @@ "type": "object", "properties": { "dockerStrategyOptions": { - "description": "DockerStrategyOptions contains additional docker-strategy specific options for the build", + "description": "dockerStrategyOptions contains additional docker-strategy specific options for the build", "$ref": "#/definitions/com.github.openshift.api.build.v1.DockerStrategyOptions" }, "env": { @@ -3441,7 +3442,7 @@ "type": "string" }, "refs": { - "description": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", + "description": "refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", "type": "array", "items": { "default": {}, @@ -3460,11 +3461,11 @@ "type": "object", "properties": { "revision": { - "description": "Revision is the git source revision information of the trigger.", + "description": "revision is the git source revision information of the trigger.", "$ref": "#/definitions/com.github.openshift.api.build.v1.SourceRevision" }, "secret": { - "description": "Secret is the obfuscated webhook secret that triggered a build.", + "description": "secret is the obfuscated webhook secret that triggered a build.", "type": "string" } } @@ -3691,11 +3692,11 @@ } }, "jenkinsfile": { - "description": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "description": "jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", "type": "string" }, "jenkinsfilePath": { - "description": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "description": "jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", "type": "string" } } @@ -3744,7 +3745,7 @@ ], "properties": { "name": { - "description": "Name is the name of the resource in the same namespace being referenced", + "description": "name is the name of the resource in the same namespace being referenced", "type": "string", "default": "" } @@ -3844,7 +3845,7 @@ ], "properties": { "git": { - "description": "Git contains information about git-based build source", + "description": "git contains information about git-based build source", "$ref": "#/definitions/com.github.openshift.api.build.v1.GitSourceRevision" }, "type": { @@ -3984,7 +3985,13 @@ "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" }, "node": { "description": "node is the node name, as specified by the Kubernetes field: node.metadata.name", @@ -4153,6 +4160,9 @@ "com.github.openshift.api.config.v1.AWSIngressSpec": { "description": "AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", "type": "object", + "required": [ + "type" + ], "properties": { "type": { "description": "type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", @@ -4210,7 +4220,7 @@ "x-kubernetes-list-type": "atomic" }, "serviceEndpoints": { - "description": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "description": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", "type": "array", "items": { "default": {}, @@ -4229,12 +4239,12 @@ ], "properties": { "key": { - "description": "key is the key of the tag", + "description": "key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. Key should consist of between 1 and 128 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.", "type": "string", "default": "" }, "value": { - "description": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", + "description": "value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. Value should consist of between 1 and 256 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", "type": "string", "default": "" } @@ -4297,11 +4307,11 @@ ], "properties": { "configuration": { - "description": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "description": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "location": { - "description": "Location is the path to a configuration file that contains the plugin's configuration", + "description": "location is the path to a configuration file that contains the plugin's configuration", "type": "string", "default": "" } @@ -4432,11 +4442,11 @@ "default": 0 }, "policyConfiguration": { - "description": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "description": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "policyFile": { - "description": "PolicyFile is a path to the file that defines the audit policy configuration.", + "description": "policyFile is a path to the file that defines the audit policy configuration.", "type": "string", "default": "" }, @@ -4542,7 +4552,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.ConfigMapNameReference" }, "oidcProviders": { - "description": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", + "description": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", "type": "array", "items": { "default": {}, @@ -4591,7 +4601,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.ConfigMapNameReference" }, "oidcClients": { - "description": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", + "description": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", "type": "array", "items": { "default": {}, @@ -4820,7 +4830,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec holds user-settable values for the build controller configuration", + "description": "spec holds user-settable values for the build controller configuration", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.BuildSpec" } @@ -4830,11 +4840,11 @@ "type": "object", "properties": { "defaultProxy": { - "description": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "description": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", "$ref": "#/definitions/com.github.openshift.api.config.v1.ProxySpec" }, "env": { - "description": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "description": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", "type": "array", "items": { "default": {}, @@ -4842,11 +4852,11 @@ } }, "gitProxy": { - "description": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "description": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", "$ref": "#/definitions/com.github.openshift.api.config.v1.ProxySpec" }, "imageLabels": { - "description": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "description": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", "type": "array", "items": { "default": {}, @@ -4854,7 +4864,7 @@ } }, "resources": { - "description": "Resources defines resource requirements to execute the build.", + "description": "resources defines resource requirements to execute the build.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" } @@ -4894,11 +4904,11 @@ "type": "object", "properties": { "forcePull": { - "description": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", + "description": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", "type": "boolean" }, "imageLabels": { - "description": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "description": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", "type": "array", "items": { "default": {}, @@ -4906,7 +4916,7 @@ } }, "nodeSelector": { - "description": "NodeSelector is a selector which must be true for the build pod to fit on a node", + "description": "nodeSelector is a selector which must be true for the build pod to fit on a node", "type": "object", "additionalProperties": { "type": "string", @@ -4914,7 +4924,7 @@ } }, "tolerations": { - "description": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "description": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", "type": "array", "items": { "default": {}, @@ -4927,17 +4937,17 @@ "type": "object", "properties": { "additionalTrustedCA": { - "description": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "description": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.ConfigMapNameReference" }, "buildDefaults": { - "description": "BuildDefaults controls the default information for Builds", + "description": "buildDefaults controls the default information for Builds", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.BuildDefaults" }, "buildOverrides": { - "description": "BuildOverrides controls override settings for builds", + "description": "buildOverrides controls override settings for builds", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.BuildOverrides" } @@ -4952,12 +4962,12 @@ ], "properties": { "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" } @@ -5071,7 +5081,7 @@ ], "properties": { "promql": { - "description": "promQL represents a cluster condition based on PromQL.", + "description": "promql represents a cluster condition based on PromQL.", "$ref": "#/definitions/com.github.openshift.api.config.v1.PromQLClusterCondition" }, "type": { @@ -5178,6 +5188,10 @@ "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.ClusterOperatorStatusCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -5362,7 +5376,7 @@ "default": "" }, "desiredUpdate": { - "description": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "description": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", "$ref": "#/definitions/com.github.openshift.api.config.v1.Update" }, "overrides": { @@ -5623,9 +5637,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "release": { "description": "release is the target of the update.", @@ -5692,7 +5704,7 @@ ], "properties": { "key": { - "description": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", + "description": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", "type": "string" }, "name": { @@ -6053,22 +6065,22 @@ ], "properties": { "ca": { - "description": "CA is a file containing trusted roots for the etcd server certificates", + "description": "ca is a file containing trusted roots for the etcd server certificates", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "urls": { - "description": "URLs are the URLs for etcd", + "description": "urls are the URLs for etcd", "type": "array", "items": { "type": "string", @@ -6087,27 +6099,27 @@ ], "properties": { "ca": { - "description": "CA is a file containing trusted roots for the etcd server certificates", + "description": "ca is a file containing trusted roots for the etcd server certificates", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "storagePrefix": { - "description": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", + "description": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", "type": "string", "default": "" }, "urls": { - "description": "URLs are the URLs for etcd", + "description": "urls are the URLs for etcd", "type": "array", "items": { "type": "string", @@ -6164,7 +6176,7 @@ "type": "object", "properties": { "platformName": { - "description": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", + "description": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", "type": "string", "default": "Unknown" } @@ -6367,12 +6379,12 @@ ], "properties": { "featureGate": { - "description": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "description": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", "type": "string", "default": "" }, "tests": { - "description": "Tests contains an item for every TestName", + "description": "tests contains an item for every TestName", "type": "array", "items": { "default": {}, @@ -6433,6 +6445,18 @@ "key" ], "x-kubernetes-list-type": "map" + }, + "serviceEndpoints": { + "description": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 9.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1.GCPServiceEndpoint" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" } } }, @@ -6482,6 +6506,26 @@ } } }, + "com.github.openshift.api.config.v1.GCPServiceEndpoint": { + "description": "GCPServiceEndpoint store the configuration of a custom url to override existing defaults of GCP Services.", + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "description": "name is the name of the GCP service whose endpoint is being overridden. This must be provided and cannot be empty.\n\nAllowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, Storage, and TagManager.\n\nAs an example, when setting the name to Compute all requests made by the caller to the GCP Compute Service will be directed to the endpoint specified in the url field.", + "type": "string", + "default": "" + }, + "url": { + "description": "url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified in the name field. url is required, must use the scheme https, must not be more than 253 characters in length, and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL)\n\nAn example of a valid endpoint that overrides the Compute Service: \"https://compute-myendpoint1.p.googleapis.com\"", + "type": "string", + "default": "" + } + } + }, "com.github.openshift.api.config.v1.GenericAPIServerConfig": { "description": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", "type": "object", @@ -6554,7 +6598,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.LeaderElection" }, "servingInfo": { - "description": "ServingInfo is the HTTP serving information for the controller's endpoints", + "description": "servingInfo is the HTTP serving information for the controller's endpoints", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.HTTPServingInfo" } @@ -6689,22 +6733,22 @@ ], "properties": { "bindAddress": { - "description": "BindAddress is the ip:port to serve on", + "description": "bindAddress is the ip:port to serve on", "type": "string", "default": "" }, "bindNetwork": { - "description": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "description": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "cipherSuites": { - "description": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "array", "items": { "type": "string", @@ -6712,26 +6756,26 @@ } }, "clientCA": { - "description": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "description": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", "type": "string" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "maxRequestsInFlight": { - "description": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "description": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", "type": "integer", "format": "int64", "default": 0 }, "minTLSVersion": { - "description": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "string" }, "namedCertificates": { - "description": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "description": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", "type": "array", "items": { "default": {}, @@ -6739,7 +6783,7 @@ } }, "requestTimeoutSeconds": { - "description": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "description": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", "type": "integer", "format": "int64", "default": 0 @@ -6782,34 +6826,48 @@ }, "com.github.openshift.api.config.v1.IBMCloudPlatformSpec": { "description": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", - "type": "object" + "type": "object", + "properties": { + "serviceEndpoints": { + "description": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1.IBMCloudServiceEndpoint" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + } + } }, "com.github.openshift.api.config.v1.IBMCloudPlatformStatus": { "description": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", "type": "object", "properties": { "cisInstanceCRN": { - "description": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "description": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", "type": "string" }, "dnsInstanceCRN": { - "description": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "description": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", "type": "string" }, "location": { - "description": "Location is where the cluster has been deployed", + "description": "location is where the cluster has been deployed", "type": "string" }, "providerType": { - "description": "ProviderType indicates the type of cluster that was created", + "description": "providerType indicates the type of cluster that was created", "type": "string" }, "resourceGroupName": { - "description": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "description": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", "type": "string" }, "serviceEndpoints": { - "description": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.", + "description": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", "type": "array", "items": { "default": {}, @@ -6836,7 +6894,7 @@ "default": "" }, "url": { - "description": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", + "description": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. The path must follow the pattern /v[0,9]+ or /api/v[0,9]+", "type": "string", "default": "" } @@ -7173,12 +7231,12 @@ ], "properties": { "name": { - "description": "Name defines the name of the label. It must have non-zero length.", + "description": "name defines the name of the label. It must have non-zero length.", "type": "string", "default": "" }, "value": { - "description": "Value defines the literal value of the label.", + "description": "value defines the literal value of the label.", "type": "string" } } @@ -7885,11 +7943,11 @@ "type": "object", "properties": { "machine": { - "description": "Machine contains MTU migration configuration for the machine's uplink.", + "description": "machine contains MTU migration configuration for the machine's uplink.", "$ref": "#/definitions/com.github.openshift.api.config.v1.MTUMigrationValues" }, "network": { - "description": "Network contains MTU migration configuration for the default network.", + "description": "network contains MTU migration configuration for the default network.", "$ref": "#/definitions/com.github.openshift.api.config.v1.MTUMigrationValues" } } @@ -7902,12 +7960,12 @@ ], "properties": { "from": { - "description": "From is the MTU to migrate from.", + "description": "from is the MTU to migrate from.", "type": "integer", "format": "int64" }, "to": { - "description": "To is the MTU to migrate to.", + "description": "to is the MTU to migrate to.", "type": "integer", "format": "int64" } @@ -7942,17 +8000,17 @@ ], "properties": { "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "names": { - "description": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "description": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", "type": "array", "items": { "type": "string", @@ -8094,11 +8152,11 @@ "type": "object", "properties": { "mtu": { - "description": "MTU is the MTU configuration that is being deployed.", + "description": "mtu is the MTU configuration that is being deployed.", "$ref": "#/definitions/com.github.openshift.api.config.v1.MTUMigration" }, "networkType": { - "description": "NetworkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "description": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", "type": "string" } } @@ -8131,7 +8189,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.NetworkDiagnostics" }, "networkType": { - "description": "NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", + "description": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", "type": "string", "default": "" }, @@ -8164,7 +8222,7 @@ "x-kubernetes-list-type": "atomic" }, "clusterNetworkMTU": { - "description": "ClusterNetworkMTU is the MTU for inter-pod networking.", + "description": "clusterNetworkMTU is the MTU for inter-pod networking.", "type": "integer", "format": "int32" }, @@ -8178,16 +8236,14 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "migration": { - "description": "Migration contains the cluster network migration configuration.", + "description": "migration contains the cluster network migration configuration.", "$ref": "#/definitions/com.github.openshift.api.config.v1.NetworkMigration" }, "networkType": { - "description": "NetworkType is the plugin that is deployed (e.g. OVNKubernetes).", + "description": "networkType is the plugin that is deployed (e.g. OVNKubernetes).", "type": "string" }, "serviceNetwork": { @@ -8267,7 +8323,7 @@ "type": "object", "properties": { "cgroupMode": { - "description": "CgroupMode determines the cgroups version on the node", + "description": "cgroupMode determines the cgroups version on the node", "type": "string" }, "minimumKubeletVersion": { @@ -8276,7 +8332,7 @@ "default": "" }, "workerLatencyProfile": { - "description": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "description": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", "type": "string" } } @@ -8294,9 +8350,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" } } }, @@ -8320,16 +8374,13 @@ "default": "" }, "subnets": { - "description": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "description": "subnets holds a list of identifiers (one or more) of the cluster's network subnets If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", "type": "array", "items": { "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.NutanixResourceIdentifier" }, - "x-kubernetes-list-map-keys": [ - "type" - ], - "x-kubernetes-list-type": "map" + "x-kubernetes-list-type": "atomic" } } }, @@ -8660,27 +8711,27 @@ ], "properties": { "clientID": { - "description": "ClientID is the identifier of the OIDC client from the OIDC provider", + "description": "clientID is the identifier of the OIDC client from the OIDC provider", "type": "string", "default": "" }, "clientSecret": { - "description": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", + "description": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.SecretNameReference" }, "componentName": { - "description": "ComponentName is the name of the component that is supposed to consume this client configuration", + "description": "componentName is the name of the component that is supposed to consume this client configuration", "type": "string", "default": "" }, "componentNamespace": { - "description": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration", + "description": "componentNamespace is the namespace of the component that is supposed to consume this client configuration", "type": "string", "default": "" }, "extraScopes": { - "description": "ExtraScopes is an optional set of scopes to request tokens with.", + "description": "extraScopes is an optional set of scopes to request tokens with.", "type": "array", "items": { "type": "string", @@ -8699,7 +8750,7 @@ ], "properties": { "clientID": { - "description": "ClientID is the identifier of the OIDC client from the OIDC provider", + "description": "clientID is the identifier of the OIDC client from the OIDC provider", "type": "string", "default": "" }, @@ -8725,17 +8776,17 @@ ], "properties": { "componentName": { - "description": "ComponentName is the name of the component that will consume a client configuration.", + "description": "componentName is the name of the component that will consume a client configuration.", "type": "string", "default": "" }, "componentNamespace": { - "description": "ComponentNamespace is the namespace of the component that will consume a client configuration.", + "description": "componentNamespace is the namespace of the component that will consume a client configuration.", "type": "string", "default": "" }, "conditions": { - "description": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", + "description": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", "type": "array", "items": { "default": {}, @@ -8747,7 +8798,7 @@ "x-kubernetes-list-type": "map" }, "consumingUsers": { - "description": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "description": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", "type": "array", "items": { "type": "string", @@ -8756,7 +8807,7 @@ "x-kubernetes-list-type": "set" }, "currentOIDCClients": { - "description": "CurrentOIDCClients is a list of clients that the component is currently using.", + "description": "currentOIDCClients is a list of clients that the component is currently using.", "type": "array", "items": { "default": {}, @@ -8780,12 +8831,12 @@ ], "properties": { "claimMappings": { - "description": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", + "description": "claimMappings describes rules on how to transform information from an ID token into a cluster identity", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.TokenClaimMappings" }, "claimValidationRules": { - "description": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", + "description": "claimValidationRules are rules that are applied to validate token claims to authenticate users.", "type": "array", "items": { "default": {}, @@ -8794,17 +8845,17 @@ "x-kubernetes-list-type": "atomic" }, "issuer": { - "description": "Issuer describes atributes of the OIDC token issuer", + "description": "issuer describes atributes of the OIDC token issuer", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.TokenIssuer" }, "name": { - "description": "Name of the OIDC provider", + "description": "name of the OIDC provider", "type": "string", "default": "" }, "oidcClients": { - "description": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer", + "description": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer", "type": "array", "items": { "default": {}, @@ -9246,23 +9297,23 @@ ], "properties": { "alibabaCloud": { - "description": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "description": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.AlibabaCloudPlatformSpec" }, "aws": { - "description": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "description": "aws contains settings specific to the Amazon Web Services infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.AWSPlatformSpec" }, "azure": { - "description": "Azure contains settings specific to the Azure infrastructure provider.", + "description": "azure contains settings specific to the Azure infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.AzurePlatformSpec" }, "baremetal": { - "description": "BareMetal contains settings specific to the BareMetal platform.", + "description": "baremetal contains settings specific to the BareMetal platform.", "$ref": "#/definitions/com.github.openshift.api.config.v1.BareMetalPlatformSpec" }, "equinixMetal": { - "description": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "description": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.EquinixMetalPlatformSpec" }, "external": { @@ -9270,31 +9321,31 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.ExternalPlatformSpec" }, "gcp": { - "description": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "description": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.GCPPlatformSpec" }, "ibmcloud": { - "description": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "description": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.IBMCloudPlatformSpec" }, "kubevirt": { - "description": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "description": "kubevirt contains settings specific to the kubevirt infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.KubevirtPlatformSpec" }, "nutanix": { - "description": "Nutanix contains settings specific to the Nutanix infrastructure provider.", + "description": "nutanix contains settings specific to the Nutanix infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.NutanixPlatformSpec" }, "openstack": { - "description": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "description": "openstack contains settings specific to the OpenStack infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.OpenStackPlatformSpec" }, "ovirt": { - "description": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "description": "ovirt contains settings specific to the oVirt infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.OvirtPlatformSpec" }, "powervs": { - "description": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "description": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.PowerVSPlatformSpec" }, "type": { @@ -9303,7 +9354,7 @@ "default": "" }, "vsphere": { - "description": "VSphere contains settings specific to the VSphere infrastructure provider.", + "description": "vsphere contains settings specific to the VSphere infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.VSpherePlatformSpec" } } @@ -9316,55 +9367,55 @@ ], "properties": { "alibabaCloud": { - "description": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "description": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.AlibabaCloudPlatformStatus" }, "aws": { - "description": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "description": "aws contains settings specific to the Amazon Web Services infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.AWSPlatformStatus" }, "azure": { - "description": "Azure contains settings specific to the Azure infrastructure provider.", + "description": "azure contains settings specific to the Azure infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.AzurePlatformStatus" }, "baremetal": { - "description": "BareMetal contains settings specific to the BareMetal platform.", + "description": "baremetal contains settings specific to the BareMetal platform.", "$ref": "#/definitions/com.github.openshift.api.config.v1.BareMetalPlatformStatus" }, "equinixMetal": { - "description": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "description": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.EquinixMetalPlatformStatus" }, "external": { - "description": "External contains settings specific to the generic External infrastructure provider.", + "description": "external contains settings specific to the generic External infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.ExternalPlatformStatus" }, "gcp": { - "description": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "description": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.GCPPlatformStatus" }, "ibmcloud": { - "description": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "description": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.IBMCloudPlatformStatus" }, "kubevirt": { - "description": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "description": "kubevirt contains settings specific to the kubevirt infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.KubevirtPlatformStatus" }, "nutanix": { - "description": "Nutanix contains settings specific to the Nutanix infrastructure provider.", + "description": "nutanix contains settings specific to the Nutanix infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.NutanixPlatformStatus" }, "openstack": { - "description": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "description": "openstack contains settings specific to the OpenStack infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.OpenStackPlatformStatus" }, "ovirt": { - "description": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "description": "ovirt contains settings specific to the oVirt infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.OvirtPlatformStatus" }, "powervs": { - "description": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "description": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.PowerVSPlatformStatus" }, "type": { @@ -9373,7 +9424,7 @@ "default": "" }, "vsphere": { - "description": "VSphere contains settings specific to the VSphere infrastructure provider.", + "description": "vsphere contains settings specific to the VSphere infrastructure provider.", "$ref": "#/definitions/com.github.openshift.api.config.v1.VSpherePlatformStatus" } } @@ -9405,11 +9456,11 @@ ], "properties": { "cisInstanceCRN": { - "description": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "description": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", "type": "string" }, "dnsInstanceCRN": { - "description": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "description": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", "type": "string" }, "region": { @@ -9469,12 +9520,12 @@ ], "properties": { "claim": { - "description": "Claim is a JWT token claim to be used in the mapping", + "description": "claim is a JWT token claim to be used in the mapping", "type": "string", "default": "" }, "prefix": { - "description": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "description": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", "type": "string", "default": "" } @@ -9580,7 +9631,7 @@ ], "properties": { "promql": { - "description": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", + "description": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", "type": "string", "default": "" } @@ -9607,7 +9658,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec holds user-settable values for the proxy configuration", + "description": "spec holds user-settable values for the proxy configuration", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.ProxySpec" }, @@ -9765,6 +9816,10 @@ "image" ], "properties": { + "architecture": { + "description": "architecture is an optional field that indicates the value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. Valid values are 'Multi' and empty.", + "type": "string" + }, "channels": { "description": "channels is the set of Cincinnati channels to which the release currently belongs.", "type": "array", @@ -9801,22 +9856,22 @@ ], "properties": { "ca": { - "description": "CA is the CA for verifying TLS connections", + "description": "ca is the CA for verifying TLS connections", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "url": { - "description": "URL is the remote URL to connect to", + "description": "url is the remote URL to connect to", "type": "string", "default": "" } @@ -10023,7 +10078,7 @@ "type": "string" }, "mastersSchedulable": { - "description": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", + "description": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", "type": "boolean", "default": false }, @@ -10071,22 +10126,22 @@ ], "properties": { "bindAddress": { - "description": "BindAddress is the ip:port to serve on", + "description": "bindAddress is the ip:port to serve on", "type": "string", "default": "" }, "bindNetwork": { - "description": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "description": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "cipherSuites": { - "description": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "array", "items": { "type": "string", @@ -10094,20 +10149,20 @@ } }, "clientCA": { - "description": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "description": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", "type": "string" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "minTLSVersion": { - "description": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "string" }, "namedCertificates": { - "description": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "description": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", "type": "array", "items": { "default": {}, @@ -10146,22 +10201,22 @@ ], "properties": { "env": { - "description": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "description": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", "type": "string", "default": "" }, "file": { - "description": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "description": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile references a file containing the key to use to decrypt the value.", + "description": "keyFile references a file containing the key to use to decrypt the value.", "type": "string", "default": "" }, "value": { - "description": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "description": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", "type": "string", "default": "" } @@ -10178,22 +10233,22 @@ ], "properties": { "env": { - "description": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "description": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", "type": "string", "default": "" }, "file": { - "description": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "description": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile references a file containing the key to use to decrypt the value.", + "description": "keyFile references a file containing the key to use to decrypt the value.", "type": "string", "default": "" }, "value": { - "description": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "description": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", "type": "string", "default": "" } @@ -10282,7 +10337,7 @@ ], "properties": { "testName": { - "description": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", + "description": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", "type": "string", "default": "" } @@ -10326,7 +10381,7 @@ ], "properties": { "testsForFeatureGates": { - "description": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", + "description": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", "type": "array", "items": { "default": {}, @@ -10345,7 +10400,7 @@ ], "properties": { "claim": { - "description": "Claim is a JWT token claim to be used in the mapping", + "description": "claim is a JWT token claim to be used in the mapping", "type": "string", "default": "" } @@ -10355,12 +10410,12 @@ "type": "object", "properties": { "groups": { - "description": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", + "description": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.PrefixedClaimMapping" }, "username": { - "description": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", + "description": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.UsernameClaimMapping" } @@ -10374,11 +10429,11 @@ ], "properties": { "requiredClaim": { - "description": "RequiredClaim allows configuring a required claim name and its expected value", + "description": "requiredClaim allows configuring a required claim name and its expected value", "$ref": "#/definitions/com.github.openshift.api.config.v1.TokenRequiredClaim" }, "type": { - "description": "Type sets the type of the validation rule", + "description": "type sets the type of the validation rule", "type": "string", "default": "" } @@ -10413,7 +10468,7 @@ ], "properties": { "audiences": { - "description": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", + "description": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", "type": "array", "items": { "type": "string", @@ -10441,12 +10496,12 @@ ], "properties": { "claim": { - "description": "Claim is a name of a required claim. Only claims with string values are supported.", + "description": "claim is a name of a required claim. Only claims with string values are supported.", "type": "string", "default": "" }, "requiredValue": { - "description": "RequiredValue is the required value for the claim.", + "description": "requiredValue is the required value for the claim.", "type": "string", "default": "" } @@ -10467,12 +10522,12 @@ "default": false }, "image": { - "description": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified.", + "description": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, architecture cannot be specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", "type": "string", "default": "" }, "version": { - "description": "version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified.", + "description": "version is a semantic version identifying the update version. version is required if architecture is specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", "type": "string", "default": "" } @@ -10532,7 +10587,7 @@ ], "properties": { "claim": { - "description": "Claim is a JWT token claim to be used in the mapping", + "description": "claim is a JWT token claim to be used in the mapping", "type": "string", "default": "" }, @@ -10540,7 +10595,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.UsernamePrefix" }, "prefixPolicy": { - "description": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", + "description": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", "type": "string", "default": "" } @@ -10558,6 +10613,86 @@ } } }, + "com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup": { + "description": "VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "type": "object", + "required": [ + "vmGroup", + "hostGroup", + "vmHostRule" + ], + "properties": { + "hostGroup": { + "description": "hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. hostGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "type": "string", + "default": "" + }, + "vmGroup": { + "description": "vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. vmGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "type": "string", + "default": "" + }, + "vmHostRule": { + "description": "vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. vmHostRule is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "type": "string", + "default": "" + } + } + }, + "com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity": { + "description": "VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "description": "type determines the vSphere object type for a region within this failure domain. Available types are Datacenter and ComputeCluster. When set to Datacenter, this means the vCenter Datacenter defined is the region. When set to ComputeCluster, this means the vCenter cluster defined is the region.\n\nPossible enum values:\n - `\"ComputeCluster\"` is a failure domain region for a vCenter compute cluster.\n - `\"Datacenter\"` is a failure domain region for a vCenter datacenter.", + "type": "string", + "default": "", + "enum": [ + "ComputeCluster", + "Datacenter" + ] + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": {} + } + ] + }, + "com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity": { + "description": "VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. This configuration within vCenter creates the required association between a failure domain, virtual machines and ESXi hosts to create a vm-host based zone.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "hostGroup": { + "description": "hostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "$ref": "#/definitions/com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup" + }, + "type": { + "description": "type determines the vSphere object type for a zone within this failure domain. Available types are ComputeCluster and HostGroup. When set to ComputeCluster, this means the vCenter cluster defined is the zone. When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and this means the zone is defined by the grouping of those fields.\n\nPossible enum values:\n - `\"ComputeCluster\"` is a failure domain zone for a vCenter compute cluster.\n - `\"HostGroup\"` is a failure domain zone for a vCenter vm-host group.", + "type": "string", + "default": "", + "enum": [ + "ComputeCluster", + "HostGroup" + ] + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "hostGroup": "HostGroup" + } + } + ] + }, "com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec": { "description": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", "type": "object", @@ -10579,13 +10714,17 @@ "type": "string", "default": "" }, + "regionAffinity": { + "description": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.", + "$ref": "#/definitions/com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity" + }, "server": { "description": "server is the fully-qualified domain name or the IP address of the vCenter server.", "type": "string", "default": "" }, "topology": { - "description": "Topology describes a given failure domain using vSphere constructs", + "description": "topology describes a given failure domain using vSphere constructs", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.VSpherePlatformTopology" }, @@ -10593,6 +10732,10 @@ "description": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", "type": "string", "default": "" + }, + "zoneAffinity": { + "description": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "$ref": "#/definitions/com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity" } } }, @@ -11044,22 +11187,99 @@ } } }, + "com.github.openshift.api.config.v1alpha1.ClusterMonitoring": { + "description": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard object metadata.", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec holds user configuration for the Cluster Monitoring Operator", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.ClusterMonitoringSpec" + }, + "status": { + "description": "status holds observed values from the cluster. They may not be overridden.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.ClusterMonitoringStatus" + } + } + }, + "com.github.openshift.api.config.v1alpha1.ClusterMonitoringList": { + "description": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of ClusterMonitoring", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.ClusterMonitoring" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard list metadata.", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + } + }, + "com.github.openshift.api.config.v1alpha1.ClusterMonitoringSpec": { + "description": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "type": "object", + "required": [ + "userDefined" + ], + "properties": { + "userDefined": { + "description": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.UserDefinedMonitoring" + } + } + }, + "com.github.openshift.api.config.v1alpha1.ClusterMonitoringStatus": { + "description": "MonitoringOperatorStatus defines the observed state of MonitoringOperator", + "type": "object" + }, "com.github.openshift.api.config.v1alpha1.EtcdBackupSpec": { "description": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator", "type": "object", "properties": { "pvcName": { - "description": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", + "description": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", "type": "string", "default": "" }, "retentionPolicy": { - "description": "RetentionPolicy defines the retention policy for retaining and deleting existing backups.", + "description": "retentionPolicy defines the retention policy for retaining and deleting existing backups.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.RetentionPolicy" }, "schedule": { - "description": "Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", + "description": "schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", "type": "string", "default": "" }, @@ -11075,7 +11295,8 @@ "type": "object", "required": [ "fulcioCAData", - "rekorKeyData" + "rekorKeyData", + "fulcioSubject" ], "properties": { "fulcioCAData": { @@ -11100,16 +11321,20 @@ "type": "object", "properties": { "dataPolicy": { - "description": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is None.", + "description": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", "type": "string" }, "disabledGatherers": { - "description": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`", + "description": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The format for the disabledGatherer should be: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`", "type": "array", "items": { "type": "string", "default": "" } + }, + "storage": { + "description": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.Storage" } } }, @@ -11291,6 +11516,77 @@ "com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus": { "type": "object" }, + "com.github.openshift.api.config.v1alpha1.PKI": { + "description": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "type": "object", + "required": [ + "caRootsData", + "pkiCertificateSubject" + ], + "properties": { + "caIntermediatesData": { + "description": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set.", + "type": "string", + "format": "byte" + }, + "caRootsData": { + "description": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters.", + "type": "string", + "format": "byte" + }, + "pkiCertificateSubject": { + "description": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.PKICertificateSubject" + } + } + }, + "com.github.openshift.api.config.v1alpha1.PKICertificateSubject": { + "description": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "type": "object", + "properties": { + "email": { + "description": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email should be a valid email address and at most 320 characters in length.", + "type": "string" + }, + "hostname": { + "description": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname should be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It should consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", + "type": "string" + } + } + }, + "com.github.openshift.api.config.v1alpha1.PersistentVolumeClaimReference": { + "description": "persistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", + "type": "string", + "default": "" + } + } + }, + "com.github.openshift.api.config.v1alpha1.PersistentVolumeConfig": { + "description": "persistentVolumeConfig provides configuration options for PersistentVolume storage.", + "type": "object", + "required": [ + "claim" + ], + "properties": { + "claim": { + "description": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.PersistentVolumeClaimReference" + }, + "mountPath": { + "description": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", + "type": "string" + } + } + }, "com.github.openshift.api.config.v1alpha1.Policy": { "description": "Policy defines the verification policy for the items in the scopes list.", "type": "object", @@ -11404,8 +11700,12 @@ "description": "fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor" }, + "pki": { + "description": "pki defines the root of trust based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates.", + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.PKI" + }, "policyType": { - "description": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification.", + "description": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. \"PKI\" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", "type": "string", "default": "" }, @@ -11419,6 +11719,7 @@ "discriminator": "policyType", "fields-to-discriminateBy": { "fulcioCAWithRekor": "FulcioCAWithRekor", + "pki": "PKI", "publicKey": "PublicKey" } } @@ -11451,7 +11752,7 @@ ], "properties": { "maxNumberOfBackups": { - "description": "MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", + "description": "maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", "type": "integer", "format": "int32" } @@ -11465,15 +11766,15 @@ ], "properties": { "retentionNumber": { - "description": "RetentionNumber configures the retention policy based on the number of backups", + "description": "retentionNumber configures the retention policy based on the number of backups", "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.RetentionNumberConfig" }, "retentionSize": { - "description": "RetentionSize configures the retention policy based on the size of backups", + "description": "retentionSize configures the retention policy based on the size of backups", "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.RetentionSizeConfig" }, "retentionType": { - "description": "RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.\n\nPossible enum values:\n - `\"RetentionNumber\"` sets the retention policy based on the number of backup files saved\n - `\"RetentionSize\"` sets the retention policy based on the total size of the backup files saved", + "description": "retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.\n\nPossible enum values:\n - `\"RetentionNumber\"` sets the retention policy based on the number of backup files saved\n - `\"RetentionSize\"` sets the retention policy based on the total size of the backup files saved", "type": "string", "default": "", "enum": [ @@ -11500,12 +11801,48 @@ ], "properties": { "maxSizeOfBackupsGb": { - "description": "MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", + "description": "maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", "type": "integer", "format": "int32" } } }, + "com.github.openshift.api.config.v1alpha1.Storage": { + "description": "storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "persistentVolume": { + "description": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", + "$ref": "#/definitions/com.github.openshift.api.config.v1alpha1.PersistentVolumeConfig" + }, + "type": { + "description": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field.", + "type": "string", + "default": "" + } + } + }, + "com.github.openshift.api.config.v1alpha1.UserDefinedMonitoring": { + "description": "UserDefinedMonitoring config for user-defined projects.", + "type": "object", + "required": [ + "mode" + ], + "properties": { + "mode": { + "description": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.\n\nPossible enum values:\n - `\"Disabled\"` disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces.\n - `\"NamespaceIsolated\"` enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.", + "type": "string", + "default": "", + "enum": [ + "Disabled", + "NamespaceIsolated" + ] + } + } + }, "com.github.openshift.api.console.v1.ApplicationMenuSpec": { "description": "ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu.", "type": "object", @@ -11514,7 +11851,7 @@ ], "properties": { "imageURL": { - "description": "imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", + "description": "imageURL is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", "type": "string" }, "section": { @@ -11905,6 +12242,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { + "description": "spec contains the desired configuration for the console plugin.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.console.v1.ConsolePluginSpec" } @@ -11936,6 +12274,38 @@ } ] }, + "com.github.openshift.api.console.v1.ConsolePluginCSP": { + "description": "ConsolePluginCSP holds configuration for a specific CSP directive", + "type": "object", + "required": [ + "directive", + "values" + ], + "properties": { + "directive": { + "description": "directive specifies which Content-Security-Policy directive to configure. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc and ConnectSrc. DefaultSrc directive serves as a fallback for the other CSP fetch directives. For more information about the DefaultSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src ScriptSrc directive specifies valid sources for JavaScript. For more information about the ScriptSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src StyleSrc directive specifies valid sources for stylesheets. For more information about the StyleSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src ImgSrc directive specifies a valid sources of images and favicons. For more information about the ImgSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src FontSrc directive specifies valid sources for fonts loaded using @font-face. For more information about the FontSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src ConnectSrc directive restricts the URLs which can be loaded using script interfaces. For more information about the ConnectSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src\n\nPossible enum values:\n - `\"ConnectSrc\"` directive restricts the URLs which can be loaded using script interfaces. For more information about the ConnectSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src\n - `\"DefaultSrc\"` directive serves as a fallback for the other CSP fetch directives. For more information about the DefaultSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src\n - `\"FontSrc\"` directive specifies valid sources for fonts loaded using @font-face. For more information about the FontSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src\n - `\"ImgSrc\"` directive specifies a valid sources of images and favicons. For more information about the ImgSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src\n - `\"ScriptSrc\"` directive specifies valid sources for JavaScript. For more information about the ScriptSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src\n - `\"StyleSrc\"` directive specifies valid sources for stylesheets. For more information about the StyleSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src", + "type": "string", + "default": "", + "enum": [ + "ConnectSrc", + "DefaultSrc", + "FontSrc", + "ImgSrc", + "ScriptSrc", + "StyleSrc" + ] + }, + "values": { + "description": "values defines an array of values to append to the console defaults for this directive. Each ConsolePlugin may define their own directives with their values. These will be set by the OpenShift web console's backend, as part of its Content-Security-Policy header. The array can contain at most 16 values. Each directive value must have a maximum length of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. Each value in the array must be unique.", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "com.github.openshift.api.console.v1.ConsolePluginI18n": { "description": "ConsolePluginI18n holds information on localization resources that are served by the dynamic plugin.", "type": "object", @@ -12106,6 +12476,18 @@ "default": {}, "$ref": "#/definitions/com.github.openshift.api.console.v1.ConsolePluginBackend" }, + "contentSecurityPolicy": { + "description": "contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. Each directive specifies a list of values, appropriate for the given directive type, for example a list of remote endpoints for fetch directives such as ScriptSrc. Console web application uses CSP to detect and mitigate certain types of attacks, such as cross-site scripting (XSS) and data injection attacks. Dynamic plugins should specify this field if need to load assets from outside the cluster or if violation reports are observed. Dynamic plugins should always prefer loading their assets from within the cluster, either by vendoring them, or fetching from a cluster service. CSP violation reports can be viewed in the browser's console logs during development and testing of the plugin in the OpenShift web console. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc and ConnectSrc. Each of the available directives may be defined only once in the list. The value 'self' is automatically included in all fetch directives by the OpenShift web console's backend. For more information about the CSP directives, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy\n\nThe OpenShift web console server aggregates the CSP directives and values across its own default values and all enabled ConsolePlugin CRs, merging them into a single policy string that is sent to the browser via `Content-Security-Policy` HTTP response header.\n\nExample:\n ConsolePlugin A directives:\n script-src: https://script1.com/, https://script2.com/\n font-src: https://font1.com/\n\n ConsolePlugin B directives:\n script-src: https://script2.com/, https://script3.com/\n font-src: https://font2.com/\n img-src: https://img1.com/\n\n Unified set of CSP directives, passed to the OpenShift web console server:\n script-src: https://script1.com/, https://script2.com/, https://script3.com/\n font-src: https://font1.com/, https://font2.com/\n img-src: https://img1.com/\n\n OpenShift web console server CSP response header:\n Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none'", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.console.v1.ConsolePluginCSP" + }, + "x-kubernetes-list-map-keys": [ + "directive" + ], + "x-kubernetes-list-type": "map" + }, "displayName": { "description": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", "type": "string", @@ -12122,7 +12504,8 @@ "items": { "default": {}, "$ref": "#/definitions/com.github.openshift.api.console.v1.ConsolePluginProxy" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -12696,6 +13079,9 @@ "com.github.openshift.api.example.v1.CELUnion": { "description": "CELUnion demonstrates how to use a discriminated union and how to validate it using CEL.", "type": "object", + "required": [ + "type" + ], "properties": { "optionalMember": { "description": "optionalMember is a union member that is optional.", @@ -12722,6 +13108,9 @@ }, "com.github.openshift.api.example.v1.EvolvingUnion": { "type": "object", + "required": [ + "type" + ], "properties": { "type": { "description": "type is the discriminator. It has different values for Default and for TechPreviewNoUpgrade", @@ -12846,6 +13235,15 @@ "description": "stableField is a field that is present on default clusters and on tech preview clusters\n\nIf empty, the platform will choose a good default, which may change over time without notice.", "type": "string", "default": "" + }, + "subdomainNameField": { + "description": "subdomainNameField represents a kubenetes name field. The intention is that it validates the name in the same way metadata.Name is validated. That is, it is a DNS-1123 subdomain.", + "type": "string" + }, + "subnetsWithExclusions": { + "description": "subnetsWithExclusions demonstrates how to validate a list of subnets with exclusions", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.example.v1.SubnetsWithExclusions" } } }, @@ -12863,9 +13261,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "immutableField": { "description": "immutableField is a field that is immutable once the object has been created. It is required at all times.", @@ -12873,6 +13269,32 @@ } } }, + "com.github.openshift.api.example.v1.SubnetsWithExclusions": { + "description": "SubnetsWithExclusions is used to validate a list of subnets with exclusions. It demonstrates how exclusions should be validated as subnetworks of the networks listed in the subnets field.", + "type": "object", + "required": [ + "subnets" + ], + "properties": { + "excludeSubnets": { + "description": "excludeSubnets is a list of CIDR exclusions. The subnets in this list must be subnetworks of the subnets in the subnets list.", + "type": "array", + "items": { + "type": "string", + "default": "" + } + }, + "subnets": { + "description": "subnets is a list of subnets. It may contain up to 2 subnets. The list may be either 1 IPv4 subnet, 1 IPv6 subnet, or 1 of each.", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "com.github.openshift.api.example.v1alpha1.NotStableConfigType": { "description": "NotStableConfigType is a stable config type that is TechPreviewNoUpgrade only.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "type": "object", @@ -12959,9 +13381,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" } } }, @@ -13113,7 +13533,11 @@ "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" } } }, @@ -13252,11 +13676,11 @@ "type": "string" }, "dockerImageConfig": { - "description": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", + "description": "dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", "type": "string" }, "dockerImageLayers": { - "description": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", + "description": "dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", "type": "array", "items": { "default": {}, @@ -13264,15 +13688,15 @@ } }, "dockerImageManifest": { - "description": "DockerImageManifest is the raw JSON of the manifest", + "description": "dockerImageManifest is the raw JSON of the manifest", "type": "string" }, "dockerImageManifestMediaType": { - "description": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "description": "dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", "type": "string" }, "dockerImageManifests": { - "description": "DockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", + "description": "dockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", "type": "array", "items": { "default": {}, @@ -13280,20 +13704,20 @@ } }, "dockerImageMetadata": { - "description": "DockerImageMetadata contains metadata about this image", + "description": "dockerImageMetadata contains metadata about this image", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension", "x-kubernetes-patch-strategy": "replace" }, "dockerImageMetadataVersion": { - "description": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "description": "dockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", "type": "string" }, "dockerImageReference": { - "description": "DockerImageReference is the string that can be used to pull this image.", + "description": "dockerImageReference is the string that can be used to pull this image.", "type": "string" }, "dockerImageSignatures": { - "description": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "description": "dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", "type": "array", "items": { "type": "string", @@ -13310,7 +13734,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "signatures": { - "description": "Signatures holds all signatures of the image.", + "description": "signatures holds all signatures of the image.", "type": "array", "items": { "default": {}, @@ -13360,26 +13784,26 @@ ], "properties": { "from": { - "description": "From is the source of an image to import; only kind DockerImage is allowed", + "description": "from is the source of an image to import; only kind DockerImage is allowed", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "importPolicy": { - "description": "ImportPolicy is the policy controlling how the image is imported", + "description": "importPolicy is the policy controlling how the image is imported", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.TagImportPolicy" }, "includeManifest": { - "description": "IncludeManifest determines if the manifest for each image is returned in the response", + "description": "includeManifest determines if the manifest for each image is returned in the response", "type": "boolean" }, "referencePolicy": { - "description": "ReferencePolicy defines how other components should consume the image", + "description": "referencePolicy defines how other components should consume the image", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.TagReferencePolicy" }, "to": { - "description": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "description": "to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" } } @@ -13392,11 +13816,11 @@ ], "properties": { "image": { - "description": "Image is the metadata of that image, if the image was located", + "description": "image is the metadata of that image, if the image was located", "$ref": "#/definitions/com.github.openshift.api.image.v1.Image" }, "manifests": { - "description": "Manifests holds sub-manifests metadata when importing a manifest list", + "description": "manifests holds sub-manifests metadata when importing a manifest list", "type": "array", "items": { "default": {}, @@ -13404,12 +13828,12 @@ } }, "status": { - "description": "Status is the status of the image import, including errors encountered while retrieving the image", + "description": "status is the status of the image import, including errors encountered while retrieving the image", "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" }, "tag": { - "description": "Tag is the tag this image was located under, if any", + "description": "tag is the tag this image was located under, if any", "type": "string" } } @@ -13424,17 +13848,17 @@ ], "properties": { "mediaType": { - "description": "MediaType of the referenced object.", + "description": "mediaType of the referenced object.", "type": "string", "default": "" }, "name": { - "description": "Name of the layer as defined by the underlying store.", + "description": "name of the layer as defined by the underlying store.", "type": "string", "default": "" }, "size": { - "description": "Size of the layer in bytes as defined by the underlying store.", + "description": "size of the layer in bytes as defined by the underlying store.", "type": "integer", "format": "int64", "default": 0 @@ -13450,12 +13874,12 @@ ], "properties": { "mediaType": { - "description": "MediaType of the referenced object.", + "description": "mediaType of the referenced object.", "type": "string", "default": "" }, "size": { - "description": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "description": "size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", "type": "integer", "format": "int64" } @@ -13473,7 +13897,7 @@ "type": "string" }, "items": { - "description": "Items is a list of images", + "description": "items is a list of images", "type": "array", "items": { "default": {}, @@ -13517,33 +13941,33 @@ ], "properties": { "architecture": { - "description": "Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", + "description": "architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", "type": "string", "default": "" }, "digest": { - "description": "Digest is the unique identifier for the manifest. It refers to an Image object.", + "description": "digest is the unique identifier for the manifest. It refers to an Image object.", "type": "string", "default": "" }, "manifestSize": { - "description": "ManifestSize represents the size of the raw object contents, in bytes.", + "description": "manifestSize represents the size of the raw object contents, in bytes.", "type": "integer", "format": "int64", "default": 0 }, "mediaType": { - "description": "MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", + "description": "mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", "type": "string", "default": "" }, "os": { - "description": "OS specifies the operating system, for example `linux`.", + "description": "os specifies the operating system, for example `linux`.", "type": "string", "default": "" }, "variant": { - "description": "Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", + "description": "variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", "type": "string" } } @@ -13561,7 +13985,7 @@ "type": "string" }, "conditions": { - "description": "Conditions represent the latest available observations of a signature's current state.", + "description": "conditions represent the latest available observations of a signature's current state.", "type": "array", "items": { "default": {}, @@ -13633,12 +14057,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec describes the desired state of this stream", + "description": "spec describes the desired state of this stream", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageStreamSpec" }, "status": { - "description": "Status describes the current state of this stream", + "description": "status describes the current state of this stream", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageStreamStatus" } @@ -13656,7 +14080,7 @@ "type": "string" }, "image": { - "description": "Image associated with the ImageStream and image name.", + "description": "image associated with the ImageStream and image name.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.Image" }, @@ -13693,12 +14117,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec is a description of the images that the user wishes to import", + "description": "spec is a description of the images that the user wishes to import", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageStreamImportSpec" }, "status": { - "description": "Status is the result of importing the image", + "description": "status is the result of importing the image", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageStreamImportStatus" } @@ -13712,7 +14136,7 @@ ], "properties": { "images": { - "description": "Images are a list of individual images to import.", + "description": "images are a list of individual images to import.", "type": "array", "items": { "default": {}, @@ -13720,12 +14144,12 @@ } }, "import": { - "description": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "description": "import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", "type": "boolean", "default": false }, "repository": { - "description": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "description": "repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", "$ref": "#/definitions/com.github.openshift.api.image.v1.RepositoryImportSpec" } } @@ -13735,7 +14159,7 @@ "type": "object", "properties": { "images": { - "description": "Images is set with the result of importing spec.images", + "description": "images is set with the result of importing spec.images", "type": "array", "items": { "default": {}, @@ -13743,11 +14167,11 @@ } }, "import": { - "description": "Import is the image stream that was successfully updated or created when 'to' was set.", + "description": "import is the image stream that was successfully updated or created when 'to' was set.", "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageStream" }, "repository": { - "description": "Repository is set if spec.repository was set to the outcome of the import", + "description": "repository is set if spec.repository was set to the outcome of the import", "$ref": "#/definitions/com.github.openshift.api.image.v1.RepositoryImportStatus" } } @@ -13803,7 +14227,7 @@ "type": "string" }, "items": { - "description": "Items is a list of imageStreams", + "description": "items is a list of imageStreams", "type": "array", "items": { "default": {}, @@ -13834,7 +14258,7 @@ "type": "string" }, "image": { - "description": "Image is a container image.", + "description": "image is a container image.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.Image" }, @@ -13848,7 +14272,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "tag": { - "description": "Tag is a string value this image can be located with inside the stream.", + "description": "tag is a string value this image can be located with inside the stream.", "type": "string", "default": "" } @@ -13887,16 +14311,16 @@ ], "properties": { "dockerImageRepository": { - "description": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "description": "dockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", "type": "string", "default": "" }, "publicDockerImageRepository": { - "description": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "description": "publicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", "type": "string" }, "tags": { - "description": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", + "description": "tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", "type": "array", "items": { "default": {}, @@ -13972,7 +14396,7 @@ "type": "string" }, "items": { - "description": "Items is the list of image stream tags", + "description": "items is the list of image stream tags", "type": "array", "items": { "default": {}, @@ -14038,7 +14462,7 @@ "type": "string" }, "items": { - "description": "Items is the list of image stream tags", + "description": "items is the list of image stream tags", "type": "array", "items": { "default": {}, @@ -14065,7 +14489,7 @@ ], "properties": { "conditions": { - "description": "Conditions is an array of conditions that apply to the tag event list.", + "description": "conditions is an array of conditions that apply to the tag event list.", "type": "array", "items": { "default": {}, @@ -14081,7 +14505,7 @@ } }, "tag": { - "description": "Tag is the tag for which the history is recorded", + "description": "tag is the tag for which the history is recorded", "type": "string", "default": "" } @@ -14095,21 +14519,21 @@ ], "properties": { "from": { - "description": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "description": "from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "importPolicy": { - "description": "ImportPolicy is the policy controlling how the image is imported", + "description": "importPolicy is the policy controlling how the image is imported", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.TagImportPolicy" }, "includeManifest": { - "description": "IncludeManifest determines if the manifest for each image is returned in the response", + "description": "includeManifest determines if the manifest for each image is returned in the response", "type": "boolean" }, "referencePolicy": { - "description": "ReferencePolicy defines how other components should consume the image", + "description": "referencePolicy defines how other components should consume the image", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.TagReferencePolicy" } @@ -14120,7 +14544,7 @@ "type": "object", "properties": { "additionalTags": { - "description": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", + "description": "additionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", "type": "array", "items": { "type": "string", @@ -14128,7 +14552,7 @@ } }, "images": { - "description": "Images is a list of images successfully retrieved by the import of the repository.", + "description": "images is a list of images successfully retrieved by the import of the repository.", "type": "array", "items": { "default": {}, @@ -14136,7 +14560,7 @@ } }, "status": { - "description": "Status reflects whether any failure occurred during import", + "description": "status reflects whether any failure occurred during import", "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } @@ -14197,12 +14621,12 @@ "type": "string" }, "status": { - "description": "Status of the condition, one of True, False, Unknown.", + "description": "status of the condition, one of True, False, Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type of signature condition, Complete or Failed.", + "description": "type of signature condition, Complete or Failed.", "type": "string", "default": "" } @@ -14217,7 +14641,7 @@ "type": "string" }, "organization": { - "description": "Organization name.", + "description": "organization name.", "type": "string" } } @@ -14231,7 +14655,7 @@ "type": "string" }, "organization": { - "description": "Organization name.", + "description": "organization name.", "type": "string" } } @@ -14248,7 +14672,7 @@ "type": "string" }, "organization": { - "description": "Organization name.", + "description": "organization name.", "type": "string" }, "publicKeyID": { @@ -14269,22 +14693,22 @@ ], "properties": { "created": { - "description": "Created holds the time the TagEvent was created", + "description": "created holds the time the TagEvent was created", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "dockerImageReference": { - "description": "DockerImageReference is the string that can be used to pull this image", + "description": "dockerImageReference is the string that can be used to pull this image", "type": "string", "default": "" }, "generation": { - "description": "Generation is the spec tag generation that resulted in this tag being updated", + "description": "generation is the spec tag generation that resulted in this tag being updated", "type": "integer", "format": "int64", "default": 0 }, "image": { - "description": "Image is the image", + "description": "image is the image", "type": "string", "default": "" } @@ -14300,30 +14724,30 @@ ], "properties": { "generation": { - "description": "Generation is the spec tag generation that this status corresponds to", + "description": "generation is the spec tag generation that this status corresponds to", "type": "integer", "format": "int64", "default": 0 }, "lastTransitionTime": { - "description": "LastTransitionTIme is the time the condition transitioned from one status to another.", + "description": "lastTransitionTime is the time the condition transitioned from one status to another.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { - "description": "Message is a human readable description of the details about last transition, complementing reason.", + "description": "message is a human readable description of the details about last transition, complementing reason.", "type": "string" }, "reason": { - "description": "Reason is a brief machine readable explanation for the condition's last transition.", + "description": "reason is a brief machine readable explanation for the condition's last transition.", "type": "string" }, "status": { - "description": "Status of the condition, one of True, False, Unknown.", + "description": "status of the condition, one of True, False, Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type of tag event condition, currently only ImportSuccess", + "description": "type of tag event condition, currently only ImportSuccess", "type": "string", "default": "" } @@ -14334,15 +14758,15 @@ "type": "object", "properties": { "importMode": { - "description": "ImportMode describes how to import an image manifest.", + "description": "importMode describes how to import an image manifest.", "type": "string" }, "insecure": { - "description": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "description": "insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", "type": "boolean" }, "scheduled": { - "description": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", + "description": "scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", "type": "boolean" } } @@ -14367,26 +14791,26 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "generation": { - "description": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "description": "generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", "type": "integer", "format": "int64" }, "importPolicy": { - "description": "ImportPolicy is information that controls how images may be imported by the server.", + "description": "importPolicy is information that controls how images may be imported by the server.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.TagImportPolicy" }, "name": { - "description": "Name of the tag", + "description": "name of the tag", "type": "string", "default": "" }, "reference": { - "description": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "description": "reference states if the tag will be imported. Default value is false, which means the tag will be imported.", "type": "boolean" }, "referencePolicy": { - "description": "ReferencePolicy defines how other components should consume the image.", + "description": "referencePolicy defines how other components should consume the image.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.image.v1.TagReferencePolicy" } @@ -14400,7 +14824,7 @@ ], "properties": { "type": { - "description": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", + "description": "type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", "type": "string", "default": "" } @@ -14441,10 +14865,6 @@ "com.github.openshift.api.insights.v1alpha1.DataGatherList": { "description": "DataGatherList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "type": "object", - "required": [ - "metadata", - "items" - ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -14456,7 +14876,8 @@ "items": { "default": {}, "$ref": "#/definitions/com.github.openshift.api.insights.v1alpha1.DataGather" - } + }, + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -14479,12 +14900,16 @@ "default": "" }, "gatherers": { - "description": "gatherers is a list of gatherers configurations. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", + "description": "gatherers is an optional list of gatherers configurations. The list must not exceed 100 items. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", "type": "array", "items": { "default": {}, "$ref": "#/definitions/com.github.openshift.api.insights.v1alpha1.GathererConfig" } + }, + "storage": { + "description": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", + "$ref": "#/definitions/com.github.openshift.api.insights.v1alpha1.Storage" } } }, @@ -14502,9 +14927,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "dataGatherState": { "description": "dataGatherState reflects the current state of the data gathering process.", @@ -14557,7 +14980,7 @@ ], "properties": { "name": { - "description": "name is the name of specific gatherer", + "description": "name is the required name of specific gatherer It must be at most 256 characters in length. The format for the gatherer name should be: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md.", "type": "string", "default": "" }, @@ -14587,9 +15010,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "lastGatherDuration": { "description": "lastGatherDuration represents the time spent gathering.", @@ -14613,7 +15034,7 @@ ], "properties": { "advisorURI": { - "description": "advisorURI provides the URL link to the Insights Advisor.", + "description": "advisorURI is required field that provides the URL link to the Insights Advisor. The link must be a valid HTTPS URL and the maximum length is 2048 characters.", "type": "string", "default": "" }, @@ -14653,7 +15074,7 @@ "x-kubernetes-list-type": "atomic" }, "uri": { - "description": "uri provides the URL link from which the report was downloaded.", + "description": "uri is optional field that provides the URL link from which the report was downloaded. The link must be a valid HTTPS URL and the maximum length is 2048 characters.", "type": "string" } } @@ -14668,21 +15089,71 @@ ], "properties": { "group": { - "description": "group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: \"\", \"apps\", \"build.openshift.io\", etc.", + "description": "group is the API Group of the Resource. Enter empty string for the core group. This value is empty or should follow the DNS1123 subdomain format and it must be at most 253 characters in length. Example: \"\", \"apps\", \"build.openshift.io\", etc.", "type": "string", "default": "" }, "name": { - "description": "name of the referent.", + "description": "name of the referent that follows the DNS1123 subdomain format. It must be at most 256 characters in length.", "type": "string", "default": "" }, "namespace": { - "description": "namespace of the referent.", + "description": "namespace of the referent that follows the DNS1123 subdomain format. It must be at most 253 characters in length.", "type": "string" }, "resource": { - "description": "resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", + "description": "resource is required field of the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", + "type": "string", + "default": "" + } + } + }, + "com.github.openshift.api.insights.v1alpha1.PersistentVolumeClaimReference": { + "description": "persistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", + "type": "string", + "default": "" + } + } + }, + "com.github.openshift.api.insights.v1alpha1.PersistentVolumeConfig": { + "description": "persistentVolumeConfig provides configuration options for PersistentVolume storage.", + "type": "object", + "required": [ + "claim" + ], + "properties": { + "claim": { + "description": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.insights.v1alpha1.PersistentVolumeClaimReference" + }, + "mountPath": { + "description": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", + "type": "string" + } + } + }, + "com.github.openshift.api.insights.v1alpha1.Storage": { + "description": "storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "persistentVolume": { + "description": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", + "$ref": "#/definitions/com.github.openshift.api.insights.v1alpha1.PersistentVolumeConfig" + }, + "type": { + "description": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the PersistentVolume field.", "type": "string", "default": "" } @@ -14834,7 +15305,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.EtcdStorageConfig" }, "userAgentMatchingConfig": { - "description": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "description": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", "default": {}, "$ref": "#/definitions/com.github.openshift.api.kubecontrolplane.v1.UserAgentMatchingConfig" } @@ -14944,12 +15415,12 @@ "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, @@ -15047,7 +15518,7 @@ ], "properties": { "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" } @@ -15076,7 +15547,7 @@ "default": "" }, "rejectionMessage": { - "description": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", + "description": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", "type": "string", "default": "" } @@ -15167,7 +15638,7 @@ ], "properties": { "groupMembershipAttributes": { - "description": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "description": "groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", "type": "array", "items": { "type": "string", @@ -15175,7 +15646,7 @@ } }, "userNameAttributes": { - "description": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "description": "userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", "type": "array", "items": { "type": "string", @@ -15197,14 +15668,14 @@ ], "properties": { "pluginConfig": { - "description": "PluginConfig allows specifying a configuration file per admission control plugin", + "description": "pluginConfig allows specifying a configuration file per admission control plugin", "type": "object", "additionalProperties": { "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.AdmissionPluginConfig" } }, "pluginOrderOverride": { - "description": "PluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", + "description": "pluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", "type": "array", "items": { "type": "string", @@ -15222,11 +15693,11 @@ ], "properties": { "configuration": { - "description": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "description": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "location": { - "description": "Location is the path to a configuration file that contains the plugin's configuration", + "description": "location is the path to a configuration file that contains the plugin's configuration", "type": "string", "default": "" } @@ -15240,7 +15711,7 @@ ], "properties": { "proxyClientInfo": { - "description": "ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", + "description": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.CertInfo" } @@ -15310,11 +15781,11 @@ "default": 0 }, "policyConfiguration": { - "description": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "description": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "policyFile": { - "description": "PolicyFile is a path to the file that defines the audit policy configuration.", + "description": "policyFile is a path to the file that defines the audit policy configuration.", "type": "string", "default": "" }, @@ -15343,7 +15814,7 @@ ], "properties": { "groupMembershipAttributes": { - "description": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "description": "groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", "type": "array", "items": { "type": "string", @@ -15351,7 +15822,7 @@ } }, "groupNameAttributes": { - "description": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "description": "groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", "type": "array", "items": { "type": "string", @@ -15369,7 +15840,7 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.LDAPQuery" }, "userNameAttributes": { - "description": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "description": "userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", "type": "array", "items": { "type": "string", @@ -15398,17 +15869,17 @@ "type": "string" }, "ca": { - "description": "CA is the CA for verifying TLS connections", + "description": "ca is the CA for verifying TLS connections", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, @@ -15417,7 +15888,7 @@ "type": "string" }, "url": { - "description": "URL is the remote URL to connect to", + "description": "url is the remote URL to connect to", "type": "string", "default": "" } @@ -15553,12 +16024,12 @@ ], "properties": { "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" } @@ -15575,23 +16046,23 @@ ], "properties": { "acceptContentTypes": { - "description": "AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "description": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", "type": "string", "default": "" }, "burst": { - "description": "Burst allows extra queries to accumulate when a client is exceeding its rate.", + "description": "burst allows extra queries to accumulate when a client is exceeding its rate.", "type": "integer", "format": "int32", "default": 0 }, "contentType": { - "description": "ContentType is the content type used when sending data to the server from this client.", + "description": "contentType is the content type used when sending data to the server from this client.", "type": "string", "default": "" }, "qps": { - "description": "QPS controls the number of queries per second allowed for this connection.", + "description": "qps controls the number of queries per second allowed for this connection.", "type": "number", "format": "float", "default": 0 @@ -15607,12 +16078,12 @@ ], "properties": { "cidr": { - "description": "CIDR defines the total range of a cluster networks address space.", + "description": "cidr defines the total range of a cluster networks address space.", "type": "string", "default": "" }, "hostSubnetLength": { - "description": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", + "description": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", "type": "integer", "format": "int64", "default": 0 @@ -15629,7 +16100,7 @@ ], "properties": { "controllers": { - "description": "Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", + "description": "controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", "type": "array", "items": { "type": "string", @@ -15637,11 +16108,11 @@ } }, "election": { - "description": "Election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "description": "election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ControllerElectionConfig" }, "serviceServingCert": { - "description": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "description": "serviceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ServiceServingCert" } @@ -15657,17 +16128,17 @@ ], "properties": { "lockName": { - "description": "LockName is the resource name used to act as the lock for determining which controller instance should lead.", + "description": "lockName is the resource name used to act as the lock for determining which controller instance should lead.", "type": "string", "default": "" }, "lockNamespace": { - "description": "LockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", + "description": "lockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", "type": "string", "default": "" }, "lockResource": { - "description": "LockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", + "description": "lockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.GroupResource" } @@ -15683,17 +16154,17 @@ ], "properties": { "allowRecursiveQueries": { - "description": "AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", + "description": "allowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", "type": "boolean", "default": false }, "bindAddress": { - "description": "BindAddress is the ip:port to serve DNS on", + "description": "bindAddress is the ip:port to serve DNS on", "type": "string", "default": "" }, "bindNetwork": { - "description": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "description": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", "type": "string", "default": "" } @@ -15711,7 +16182,7 @@ "type": "string" }, "disable": { - "description": "Disable turns off an admission plugin that is enabled by default.", + "description": "disable turns off an admission plugin that is enabled by default.", "type": "boolean", "default": false }, @@ -15745,17 +16216,17 @@ ], "properties": { "dockerShimRootDirectory": { - "description": "DockershimRootDirectory is the dockershim root directory.", + "description": "dockerShimRootDirectory is the dockershim root directory.", "type": "string", "default": "" }, "dockerShimSocket": { - "description": "DockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", + "description": "dockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", "type": "string", "default": "" }, "execHandlerName": { - "description": "ExecHandlerName is the name of the handler to use for executing commands in containers.", + "description": "execHandlerName is the name of the handler to use for executing commands in containers.", "type": "string", "default": "" } @@ -15773,22 +16244,22 @@ ], "properties": { "address": { - "description": "Address is the advertised host:port for client connections to etcd", + "description": "address is the advertised host:port for client connections to etcd", "type": "string", "default": "" }, "peerAddress": { - "description": "PeerAddress is the advertised host:port for peer connections to etcd", + "description": "peerAddress is the advertised host:port for peer connections to etcd", "type": "string", "default": "" }, "peerServingInfo": { - "description": "PeerServingInfo describes how to start serving the etcd peer", + "description": "peerServingInfo describes how to start serving the etcd peer", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ServingInfo" }, "servingInfo": { - "description": "ServingInfo describes how to start serving the etcd master", + "description": "servingInfo describes how to start serving the etcd master", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ServingInfo" }, @@ -15810,22 +16281,22 @@ ], "properties": { "ca": { - "description": "CA is a file containing trusted roots for the etcd server certificates", + "description": "ca is a file containing trusted roots for the etcd server certificates", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "urls": { - "description": "URLs are the URLs for etcd", + "description": "urls are the URLs for etcd", "type": "array", "items": { "type": "string", @@ -15845,22 +16316,22 @@ ], "properties": { "kubernetesStoragePrefix": { - "description": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", + "description": "kubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", "type": "string", "default": "" }, "kubernetesStorageVersion": { - "description": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "description": "kubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", "type": "string", "default": "" }, "openShiftStoragePrefix": { - "description": "OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", + "description": "openShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", "type": "string", "default": "" }, "openShiftStorageVersion": { - "description": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "description": "openShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", "type": "string", "default": "" } @@ -15883,21 +16354,21 @@ "type": "string" }, "ca": { - "description": "CA is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", + "description": "ca is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", "type": "string", "default": "" }, "clientID": { - "description": "ClientID is the oauth client ID", + "description": "clientID is the oauth client ID", "type": "string", "default": "" }, "clientSecret": { - "description": "ClientSecret is the oauth client secret", + "description": "clientSecret is the oauth client secret", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.StringSource" }, "hostname": { - "description": "Hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "description": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", "type": "string", "default": "" }, @@ -15906,7 +16377,7 @@ "type": "string" }, "organizations": { - "description": "Organizations optionally restricts which organizations are allowed to log in", + "description": "organizations optionally restricts which organizations are allowed to log in", "type": "array", "items": { "type": "string", @@ -15914,7 +16385,7 @@ } }, "teams": { - "description": "Teams optionally restricts which teams are allowed to log in. Format is /.", + "description": "teams optionally restricts which teams are allowed to log in. Format is /.", "type": "array", "items": { "type": "string", @@ -15938,17 +16409,17 @@ "type": "string" }, "ca": { - "description": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "description": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "type": "string", "default": "" }, "clientID": { - "description": "ClientID is the oauth client ID", + "description": "clientID is the oauth client ID", "type": "string", "default": "" }, "clientSecret": { - "description": "ClientSecret is the oauth client secret", + "description": "clientSecret is the oauth client secret", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.StringSource" }, "kind": { @@ -15956,11 +16427,11 @@ "type": "string" }, "legacy": { - "description": "Legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", + "description": "legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", "type": "boolean" }, "url": { - "description": "URL is the oauth server base URL", + "description": "url is the oauth server base URL", "type": "string", "default": "" } @@ -15980,16 +16451,16 @@ "type": "string" }, "clientID": { - "description": "ClientID is the oauth client ID", + "description": "clientID is the oauth client ID", "type": "string", "default": "" }, "clientSecret": { - "description": "ClientSecret is the oauth client secret", + "description": "clientSecret is the oauth client secret", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.StringSource" }, "hostedDomain": { - "description": "HostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", + "description": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", "type": "string", "default": "" }, @@ -16008,12 +16479,12 @@ ], "properties": { "method": { - "description": "Method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "description": "method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", "type": "string", "default": "" }, "serviceAccountMethod": { - "description": "ServiceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", + "description": "serviceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", "type": "string", "default": "" } @@ -16028,12 +16499,12 @@ ], "properties": { "group": { - "description": "Group is the name of an API group", + "description": "group is the name of an API group", "type": "string", "default": "" }, "resource": { - "description": "Resource is the name of a resource.", + "description": "resource is the name of a resource.", "type": "string", "default": "" } @@ -16051,7 +16522,7 @@ "type": "string" }, "file": { - "description": "File is a reference to your htpasswd file", + "description": "file is a reference to your htpasswd file", "type": "string", "default": "" }, @@ -16076,22 +16547,22 @@ ], "properties": { "bindAddress": { - "description": "BindAddress is the ip:port to serve on", + "description": "bindAddress is the ip:port to serve on", "type": "string", "default": "" }, "bindNetwork": { - "description": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "description": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "cipherSuites": { - "description": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "array", "items": { "type": "string", @@ -16099,27 +16570,27 @@ } }, "clientCA": { - "description": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "description": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "maxRequestsInFlight": { - "description": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "description": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", "type": "integer", "format": "int32", "default": 0 }, "minTLSVersion": { - "description": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "string" }, "namedCertificates": { - "description": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "description": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", "type": "array", "items": { "default": {}, @@ -16127,7 +16598,7 @@ } }, "requestTimeoutSeconds": { - "description": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "description": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", "type": "integer", "format": "int32", "default": 0 @@ -16156,17 +16627,17 @@ "default": false }, "mappingMethod": { - "description": "MappingMethod determines how identities from this provider are mapped to users", + "description": "mappingMethod determines how identities from this provider are mapped to users", "type": "string", "default": "" }, "name": { - "description": "Name is used to qualify the identities returned by this provider", + "description": "name is used to qualify the identities returned by this provider", "type": "string", "default": "" }, "provider": { - "description": "Provider contains the information about how to set up a specific identity provider", + "description": "provider contains the information about how to set up a specific identity provider", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" } } @@ -16180,12 +16651,12 @@ ], "properties": { "format": { - "description": "Format is the format of the name to be built for the system component", + "description": "format is the format of the name to be built for the system component", "type": "string", "default": "" }, "latest": { - "description": "Latest determines if the latest tag will be pulled from the registry", + "description": "latest determines if the latest tag will be pulled from the registry", "type": "boolean", "default": false } @@ -16202,11 +16673,11 @@ ], "properties": { "additionalTrustedCA": { - "description": "AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", + "description": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", "type": "string" }, "allowedRegistriesForImport": { - "description": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "description": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", "type": "array", "items": { "default": {}, @@ -16214,32 +16685,32 @@ } }, "disableScheduledImport": { - "description": "DisableScheduledImport allows scheduled background import of images to be disabled.", + "description": "disableScheduledImport allows scheduled background import of images to be disabled.", "type": "boolean", "default": false }, "externalRegistryHostname": { - "description": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "description": "externalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", "type": "string" }, "internalRegistryHostname": { - "description": "InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", + "description": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", "type": "string" }, "maxImagesBulkImportedPerRepository": { - "description": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "description": "maxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", "type": "integer", "format": "int32", "default": 0 }, "maxScheduledImageImportsPerMinute": { - "description": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "description": "maxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", "type": "integer", "format": "int32", "default": 0 }, "scheduledImageImportMinimumIntervalSeconds": { - "description": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", + "description": "scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", "type": "integer", "format": "int32", "default": 0 @@ -16258,11 +16729,11 @@ ], "properties": { "autoProvisionEnabled": { - "description": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "description": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", "type": "boolean" }, "parameters": { - "description": "Parameters specifies a set of optional parameters to the Jenkins template.", + "description": "parameters specifies a set of optional parameters to the Jenkins template.", "type": "object", "additionalProperties": { "type": "string", @@ -16270,17 +16741,17 @@ } }, "serviceName": { - "description": "ServiceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "description": "serviceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", "type": "string", "default": "" }, "templateName": { - "description": "TemplateName is the name of the default Jenkins template", + "description": "templateName is the name of the default Jenkins template", "type": "string", "default": "" }, "templateNamespace": { - "description": "TemplateNamespace contains the namespace name where the Jenkins template is stored", + "description": "templateNamespace contains the namespace name where the Jenkins template is stored", "type": "string", "default": "" } @@ -16303,12 +16774,12 @@ "type": "string" }, "ca": { - "description": "CA is the CA for verifying TLS connections", + "description": "ca is the CA for verifying TLS connections", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, @@ -16318,7 +16789,7 @@ "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, @@ -16327,12 +16798,12 @@ "type": "string" }, "url": { - "description": "URL is the remote URL to connect to", + "description": "url is the remote URL to connect to", "type": "string", "default": "" }, "useKeystoneIdentity": { - "description": "UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", + "description": "useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", "type": "boolean", "default": false } @@ -16349,22 +16820,22 @@ ], "properties": { "ca": { - "description": "CA is the CA for verifying TLS connections to kubelets", + "description": "ca is the CA for verifying TLS connections to kubelets", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "port": { - "description": "Port is the port to connect to kubelets on", + "description": "port is the port to connect to kubelets on", "type": "integer", "format": "int32", "default": 0 @@ -16390,7 +16861,7 @@ ], "properties": { "apiLevels": { - "description": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", + "description": "apiLevels is a list of API levels that should be enabled on startup: v1 as examples", "type": "array", "items": { "type": "string", @@ -16398,7 +16869,7 @@ } }, "apiServerArguments": { - "description": "APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "description": "apiServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", "type": "object", "additionalProperties": { "type": "array", @@ -16409,7 +16880,7 @@ } }, "controllerArguments": { - "description": "ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "description": "controllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", "type": "object", "additionalProperties": { "type": "array", @@ -16420,7 +16891,7 @@ } }, "disabledAPIGroupVersions": { - "description": "DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", + "description": "disabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", "type": "object", "additionalProperties": { "type": "array", @@ -16431,28 +16902,28 @@ } }, "masterEndpointReconcileTTL": { - "description": "MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", + "description": "masterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", "type": "integer", "format": "int32", "default": 0 }, "masterIP": { - "description": "MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", + "description": "masterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", "type": "string", "default": "" }, "podEvictionTimeout": { - "description": "PodEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", + "description": "podEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", "type": "string", "default": "" }, "proxyClientInfo": { - "description": "ProxyClientInfo specifies the client cert/key to use when proxying to pods", + "description": "proxyClientInfo specifies the client cert/key to use when proxying to pods", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.CertInfo" }, "schedulerArguments": { - "description": "SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "description": "schedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", "type": "object", "additionalProperties": { "type": "array", @@ -16463,17 +16934,17 @@ } }, "schedulerConfigFile": { - "description": "SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", + "description": "schedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", "type": "string", "default": "" }, "servicesNodePortRange": { - "description": "ServicesNodePortRange is the range to use for assigning service public ports on a host.", + "description": "servicesNodePortRange is the range to use for assigning service public ports on a host.", "type": "string", "default": "" }, "servicesSubnet": { - "description": "ServicesSubnet is the subnet to use for assigning service IPs", + "description": "servicesSubnet is the subnet to use for assigning service IPs", "type": "string", "default": "" } @@ -16490,7 +16961,7 @@ ], "properties": { "email": { - "description": "Email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "description": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", "type": "array", "items": { "type": "string", @@ -16498,7 +16969,7 @@ } }, "id": { - "description": "ID is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "description": "id is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", "type": "array", "items": { "type": "string", @@ -16506,7 +16977,7 @@ } }, "name": { - "description": "Name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "description": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", "type": "array", "items": { "type": "string", @@ -16514,7 +16985,7 @@ } }, "preferredUsername": { - "description": "PreferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "description": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", "type": "array", "items": { "type": "string", @@ -16540,21 +17011,21 @@ "type": "string" }, "attributes": { - "description": "Attributes maps LDAP attributes to identities", + "description": "attributes maps LDAP attributes to identities", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.LDAPAttributeMapping" }, "bindDN": { - "description": "BindDN is an optional DN to bind with during the search phase.", + "description": "bindDN is an optional DN to bind with during the search phase.", "type": "string", "default": "" }, "bindPassword": { - "description": "BindPassword is an optional password to bind with during the search phase.", + "description": "bindPassword is an optional password to bind with during the search phase.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.StringSource" }, "ca": { - "description": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "description": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "type": "string", "default": "" }, @@ -16568,7 +17039,7 @@ "type": "string" }, "url": { - "description": "URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "description": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", "type": "string", "default": "" } @@ -16597,12 +17068,12 @@ "default": "" }, "filter": { - "description": "Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", + "description": "filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", "type": "string", "default": "" }, "pageSize": { - "description": "PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", + "description": "pageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", "type": "integer", "format": "int32", "default": 0 @@ -16645,16 +17116,16 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.AugmentedActiveDirectoryConfig" }, "bindDN": { - "description": "BindDN is an optional DN to bind to the LDAP server with", + "description": "bindDN is an optional DN to bind to the LDAP server with", "type": "string", "default": "" }, "bindPassword": { - "description": "BindPassword is an optional password to bind with during the search phase.", + "description": "bindPassword is an optional password to bind with during the search phase.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.StringSource" }, "ca": { - "description": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "description": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "type": "string", "default": "" }, @@ -16709,12 +17180,12 @@ ], "properties": { "oauthMetadataFile": { - "description": "OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", + "description": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", "type": "string", "default": "" }, "requestHeader": { - "description": "RequestHeader holds options for setting up a front proxy against the API. It is optional.", + "description": "requestHeader holds options for setting up a front proxy against the API. It is optional.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.RequestHeaderAuthenticationOptions" }, "webhookTokenAuthenticators": { @@ -16736,11 +17207,11 @@ ], "properties": { "openshiftLoopbackClientConnectionOverrides": { - "description": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", + "description": "openshiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ClientConnectionOverrides" }, "openshiftLoopbackKubeConfig": { - "description": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", + "description": "openshiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", "type": "string", "default": "" } @@ -16780,17 +17251,17 @@ ], "properties": { "admissionConfig": { - "description": "AdmissionConfig contains admission control plugin configuration.", + "description": "admissionConfig contains admission control plugin configuration.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.AdmissionConfig" }, "aggregatorConfig": { - "description": "AggregatorConfig has options for configuring the aggregator component of the API server.", + "description": "aggregatorConfig has options for configuring the aggregator component of the API server.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.AggregatorConfig" }, "apiLevels": { - "description": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", + "description": "apiLevels is a list of API levels that should be enabled on startup: v1 as examples", "type": "array", "items": { "type": "string", @@ -16802,22 +17273,22 @@ "type": "string" }, "auditConfig": { - "description": "AuditConfig holds information related to auditing capabilities.", + "description": "auditConfig holds information related to auditing capabilities.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.AuditConfig" }, "authConfig": { - "description": "AuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "description": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.MasterAuthConfig" }, "controllerConfig": { - "description": "ControllerConfig holds configuration values for controllers", + "description": "controllerConfig holds configuration values for controllers", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ControllerConfig" }, "controllers": { - "description": "Controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", + "description": "controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", "type": "string", "default": "" }, @@ -16834,7 +17305,7 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.DNSConfig" }, "etcdClientInfo": { - "description": "EtcdClientInfo contains information about how to connect to etcd", + "description": "etcdClientInfo contains information about how to connect to etcd", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.EtcdConnectionInfo" }, @@ -16843,22 +17314,22 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.EtcdConfig" }, "etcdStorageConfig": { - "description": "EtcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", + "description": "etcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.EtcdStorageConfig" }, "imageConfig": { - "description": "ImageConfig holds options that describe how to build image names for system components", + "description": "imageConfig holds options that describe how to build image names for system components", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ImageConfig" }, "imagePolicyConfig": { - "description": "ImagePolicyConfig controls limits and behavior for importing images", + "description": "imagePolicyConfig controls limits and behavior for importing images", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ImagePolicyConfig" }, "jenkinsPipelineConfig": { - "description": "JenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "description": "jenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.JenkinsPipelineConfig" }, @@ -16867,7 +17338,7 @@ "type": "string" }, "kubeletClientInfo": { - "description": "KubeletClientInfo contains information about how to connect to kubelets", + "description": "kubeletClientInfo contains information about how to connect to kubelets", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.KubeletConnectionInfo" }, @@ -16877,17 +17348,17 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.KubernetesMasterConfig" }, "masterClients": { - "description": "MasterClients holds all the client connection information for controllers and other system components", + "description": "masterClients holds all the client connection information for controllers and other system components", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.MasterClients" }, "masterPublicURL": { - "description": "MasterPublicURL is how clients can access the OpenShift API server", + "description": "masterPublicURL is how clients can access the OpenShift API server", "type": "string", "default": "" }, "networkConfig": { - "description": "NetworkConfig to be passed to the compiled in network plugin", + "description": "networkConfig to be passed to the compiled in network plugin", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.MasterNetworkConfig" }, @@ -16896,27 +17367,27 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.OAuthConfig" }, "policyConfig": { - "description": "PolicyConfig holds information about where to locate critical pieces of bootstrapping policy", + "description": "policyConfig holds information about where to locate critical pieces of bootstrapping policy", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.PolicyConfig" }, "projectConfig": { - "description": "ProjectConfig holds information about project creation and defaults", + "description": "projectConfig holds information about project creation and defaults", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ProjectConfig" }, "routingConfig": { - "description": "RoutingConfig holds information about routing and route generation", + "description": "routingConfig holds information about routing and route generation", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.RoutingConfig" }, "serviceAccountConfig": { - "description": "ServiceAccountConfig holds options related to service accounts", + "description": "serviceAccountConfig holds options related to service accounts", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ServiceAccountConfig" }, "servingInfo": { - "description": "ServingInfo describes how to start serving", + "description": "servingInfo describes how to start serving", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.HTTPServingInfo" }, @@ -16939,11 +17410,11 @@ ], "properties": { "clusterNetworkCIDR": { - "description": "ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "description": "clusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", "type": "string" }, "clusterNetworks": { - "description": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", + "description": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", "type": "array", "items": { "default": {}, @@ -16951,7 +17422,7 @@ } }, "externalIPNetworkCIDRs": { - "description": "ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", + "description": "externalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", "type": "array", "items": { "type": "string", @@ -16959,17 +17430,17 @@ } }, "hostSubnetLength": { - "description": "HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "description": "hostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", "type": "integer", "format": "int64" }, "ingressIPNetworkCIDR": { - "description": "IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", + "description": "ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", "type": "string", "default": "" }, "networkPluginName": { - "description": "NetworkPluginName is the name of the network plugin to use", + "description": "networkPluginName is the name of the network plugin to use", "type": "string", "default": "" }, @@ -16979,7 +17450,7 @@ "default": "" }, "vxlanPort": { - "description": "VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", + "description": "vxlanPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", "type": "integer", "format": "int64" } @@ -16993,7 +17464,7 @@ ], "properties": { "dynamicProvisioningEnabled": { - "description": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", + "description": "dynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", "type": "boolean" } } @@ -17008,17 +17479,17 @@ ], "properties": { "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "names": { - "description": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "description": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", "type": "array", "items": { "type": "string", @@ -17038,24 +17509,24 @@ ], "properties": { "authenticationCacheSize": { - "description": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", + "description": "authenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", "type": "integer", "format": "int32", "default": 0 }, "authenticationCacheTTL": { - "description": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "description": "authenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", "type": "string", "default": "" }, "authorizationCacheSize": { - "description": "AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", + "description": "authorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", "type": "integer", "format": "int32", "default": 0 }, "authorizationCacheTTL": { - "description": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "description": "authorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", "type": "string", "default": "" } @@ -17088,7 +17559,7 @@ ], "properties": { "allowDisabledDocker": { - "description": "AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", + "description": "allowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", "type": "boolean", "default": false }, @@ -17097,27 +17568,27 @@ "type": "string" }, "authConfig": { - "description": "AuthConfig holds authn/authz configuration options", + "description": "authConfig holds authn/authz configuration options", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.NodeAuthConfig" }, "dnsBindAddress": { - "description": "DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", + "description": "dnsBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", "type": "string", "default": "" }, "dnsDomain": { - "description": "DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", + "description": "dnsDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", "type": "string", "default": "" }, "dnsIP": { - "description": "DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", + "description": "dnsIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", "type": "string", "default": "" }, "dnsNameservers": { - "description": "DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", + "description": "dnsNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", "type": "array", "items": { "type": "string", @@ -17125,26 +17596,26 @@ } }, "dnsRecursiveResolvConf": { - "description": "DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", + "description": "dnsRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", "type": "string", "default": "" }, "dockerConfig": { - "description": "DockerConfig holds Docker related configuration options.", + "description": "dockerConfig holds Docker related configuration options.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.DockerConfig" }, "enableUnidling": { - "description": "EnableUnidling controls whether or not the hybrid unidling proxy will be set up", + "description": "enableUnidling controls whether or not the hybrid unidling proxy will be set up", "type": "boolean" }, "imageConfig": { - "description": "ImageConfig holds options that describe how to build image names for system components", + "description": "imageConfig holds options that describe how to build image names for system components", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ImageConfig" }, "iptablesSyncPeriod": { - "description": "IPTablesSyncPeriod is how often iptable rules are refreshed", + "description": "iptablesSyncPeriod is how often iptable rules are refreshed", "type": "string", "default": "" }, @@ -17153,7 +17624,7 @@ "type": "string" }, "kubeletArguments": { - "description": "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "description": "kubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", "type": "object", "additionalProperties": { "type": "array", @@ -17164,16 +17635,16 @@ } }, "masterClientConnectionOverrides": { - "description": "MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", + "description": "masterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ClientConnectionOverrides" }, "masterKubeConfig": { - "description": "MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", + "description": "masterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", "type": "string", "default": "" }, "networkConfig": { - "description": "NetworkConfig provides network options for the node", + "description": "networkConfig provides network options for the node", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.NodeNetworkConfig" }, @@ -17187,16 +17658,16 @@ "default": "" }, "nodeName": { - "description": "NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", + "description": "nodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", "type": "string", "default": "" }, "podManifestConfig": { - "description": "PodManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", + "description": "podManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.PodManifestConfig" }, "proxyArguments": { - "description": "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "description": "proxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", "type": "object", "additionalProperties": { "type": "array", @@ -17207,17 +17678,17 @@ } }, "servingInfo": { - "description": "ServingInfo describes how to start serving", + "description": "servingInfo describes how to start serving", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.ServingInfo" }, "volumeConfig": { - "description": "VolumeConfig contains options for configuring volumes on the node.", + "description": "volumeConfig contains options for configuring volumes on the node.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.NodeVolumeConfig" }, "volumeDirectory": { - "description": "VolumeDirectory is the directory that volumes will be stored under", + "description": "volumeDirectory is the directory that volumes will be stored under", "type": "string", "default": "" } @@ -17238,7 +17709,7 @@ "default": 0 }, "networkPluginName": { - "description": "NetworkPluginName is a string specifying the networking plugin", + "description": "networkPluginName is a string specifying the networking plugin", "type": "string", "default": "" } @@ -17252,7 +17723,7 @@ ], "properties": { "localQuota": { - "description": "LocalQuota contains options for controlling local volume quota on the node.", + "description": "localQuota contains options for controlling local volume quota on the node.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.LocalQuota" } @@ -17275,22 +17746,22 @@ ], "properties": { "alwaysShowProviderSelection": { - "description": "AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "description": "alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", "type": "boolean", "default": false }, "assetPublicURL": { - "description": "AssetPublicURL is used for building valid client redirect URLs for external access", + "description": "assetPublicURL is used for building valid client redirect URLs for external access", "type": "string", "default": "" }, "grantConfig": { - "description": "GrantConfig describes how to handle grants", + "description": "grantConfig describes how to handle grants", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.GrantConfig" }, "identityProviders": { - "description": "IdentityProviders is an ordered list of ways for a user to identify themselves", + "description": "identityProviders is an ordered list of ways for a user to identify themselves", "type": "array", "items": { "default": {}, @@ -17298,29 +17769,29 @@ } }, "masterCA": { - "description": "MasterCA is the CA for verifying the TLS connection back to the MasterURL.", + "description": "masterCA is the CA for verifying the TLS connection back to the MasterURL.", "type": "string" }, "masterPublicURL": { - "description": "MasterPublicURL is used for building valid client redirect URLs for internal and external access", + "description": "masterPublicURL is used for building valid client redirect URLs for internal and external access", "type": "string", "default": "" }, "masterURL": { - "description": "MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens", + "description": "masterURL is used for making server-to-server calls to exchange authorization codes for access tokens", "type": "string", "default": "" }, "sessionConfig": { - "description": "SessionConfig hold information about configuring sessions.", + "description": "sessionConfig hold information about configuring sessions.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.SessionConfig" }, "templates": { - "description": "Templates allow you to customize pages like the login page.", + "description": "templates allow you to customize pages like the login page.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.OAuthTemplates" }, "tokenConfig": { - "description": "TokenConfig contains options for authorization and access tokens", + "description": "tokenConfig contains options for authorization and access tokens", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.TokenConfig" } @@ -17336,17 +17807,17 @@ ], "properties": { "error": { - "description": "Error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", + "description": "error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", "type": "string", "default": "" }, "login": { - "description": "Login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "description": "login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", "type": "string", "default": "" }, "providerSelection": { - "description": "ProviderSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "description": "providerSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", "type": "string", "default": "" } @@ -17363,7 +17834,7 @@ ], "properties": { "email": { - "description": "Email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "description": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", "type": "array", "items": { "type": "string", @@ -17371,7 +17842,7 @@ } }, "id": { - "description": "ID is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "description": "id is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", "type": "array", "items": { "type": "string", @@ -17379,7 +17850,7 @@ } }, "name": { - "description": "Name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "description": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", "type": "array", "items": { "type": "string", @@ -17387,7 +17858,7 @@ } }, "preferredUsername": { - "description": "PreferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "description": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", "type": "array", "items": { "type": "string", @@ -17414,26 +17885,26 @@ "type": "string" }, "ca": { - "description": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "description": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "type": "string", "default": "" }, "claims": { - "description": "Claims mappings", + "description": "claims mappings", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.OpenIDClaims" }, "clientID": { - "description": "ClientID is the oauth client ID", + "description": "clientID is the oauth client ID", "type": "string", "default": "" }, "clientSecret": { - "description": "ClientSecret is the oauth client secret", + "description": "clientSecret is the oauth client secret", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.StringSource" }, "extraAuthorizeParameters": { - "description": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.", + "description": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", "type": "object", "additionalProperties": { "type": "string", @@ -17441,7 +17912,7 @@ } }, "extraScopes": { - "description": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "description": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", "type": "array", "items": { "type": "string", @@ -17453,7 +17924,7 @@ "type": "string" }, "urls": { - "description": "URLs to use to authenticate", + "description": "urls to use to authenticate", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.OpenIDURLs" } @@ -17469,17 +17940,17 @@ ], "properties": { "authorize": { - "description": "Authorize is the oauth authorization URL", + "description": "authorize is the oauth authorization URL", "type": "string", "default": "" }, "token": { - "description": "Token is the oauth token granting URL", + "description": "token is the oauth token granting URL", "type": "string", "default": "" }, "userInfo": { - "description": "UserInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", + "description": "userInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", "type": "string", "default": "" } @@ -17494,13 +17965,13 @@ ], "properties": { "fileCheckIntervalSeconds": { - "description": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", + "description": "fileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", "type": "integer", "format": "int64", "default": 0 }, "path": { - "description": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", + "description": "path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", "type": "string", "default": "" } @@ -17514,7 +17985,7 @@ ], "properties": { "userAgentMatchingConfig": { - "description": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "description": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", "default": {}, "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.UserAgentMatchingConfig" } @@ -17531,22 +18002,22 @@ ], "properties": { "defaultNodeSelector": { - "description": "DefaultNodeSelector holds default project node label selector", + "description": "defaultNodeSelector holds default project node label selector", "type": "string", "default": "" }, "projectRequestMessage": { - "description": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "description": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", "type": "string", "default": "" }, "projectRequestTemplate": { - "description": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", + "description": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", "type": "string", "default": "" }, "securityAllocator": { - "description": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "description": "securityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.SecurityAllocator" } } @@ -17567,7 +18038,7 @@ ], "properties": { "groupMembershipAttributes": { - "description": "GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", + "description": "groupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", "type": "array", "items": { "type": "string", @@ -17575,7 +18046,7 @@ } }, "groupNameAttributes": { - "description": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "description": "groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", "type": "array", "items": { "type": "string", @@ -17593,17 +18064,17 @@ "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.LDAPQuery" }, "tolerateMemberNotFoundErrors": { - "description": "TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", + "description": "tolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", "type": "boolean", "default": false }, "tolerateMemberOutOfScopeErrors": { - "description": "TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", + "description": "tolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", "type": "boolean", "default": false }, "userNameAttributes": { - "description": "UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", + "description": "userNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", "type": "array", "items": { "type": "string", @@ -17611,7 +18082,7 @@ } }, "userUIDAttribute": { - "description": "UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", + "description": "userUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", "type": "string", "default": "" }, @@ -17630,12 +18101,12 @@ ], "properties": { "domainName": { - "description": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "description": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", "type": "string", "default": "" }, "insecure": { - "description": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "description": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", "type": "boolean" } } @@ -17651,22 +18122,22 @@ ], "properties": { "ca": { - "description": "CA is the CA for verifying TLS connections", + "description": "ca is the CA for verifying TLS connections", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "url": { - "description": "URL is the remote URL to connect to", + "description": "url is the remote URL to connect to", "type": "string", "default": "" } @@ -17684,12 +18155,12 @@ ], "properties": { "clientCA": { - "description": "ClientCA is a file with the trusted signer certs. It is required.", + "description": "clientCA is a file with the trusted signer certs. It is required.", "type": "string", "default": "" }, "clientCommonNames": { - "description": "ClientCommonNames is a required list of common names to require a match from.", + "description": "clientCommonNames is a required list of common names to require a match from.", "type": "array", "items": { "type": "string", @@ -17697,7 +18168,7 @@ } }, "extraHeaderPrefixes": { - "description": "ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", + "description": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", "type": "array", "items": { "type": "string", @@ -17713,7 +18184,7 @@ } }, "usernameHeaders": { - "description": "UsernameHeaders is the list of headers to check for user information. First hit wins.", + "description": "usernameHeaders is the list of headers to check for user information. First hit wins.", "type": "array", "items": { "type": "string", @@ -17741,17 +18212,17 @@ "type": "string" }, "challengeURL": { - "description": "ChallengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "description": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", "type": "string", "default": "" }, "clientCA": { - "description": "ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "description": "clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", "type": "string", "default": "" }, "clientCommonNames": { - "description": "ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "description": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", "type": "array", "items": { "type": "string", @@ -17759,7 +18230,7 @@ } }, "emailHeaders": { - "description": "EmailHeaders is the set of headers to check for the email address", + "description": "emailHeaders is the set of headers to check for the email address", "type": "array", "items": { "type": "string", @@ -17767,7 +18238,7 @@ } }, "headers": { - "description": "Headers is the set of headers to check for identity information", + "description": "headers is the set of headers to check for identity information", "type": "array", "items": { "type": "string", @@ -17779,12 +18250,12 @@ "type": "string" }, "loginURL": { - "description": "LoginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "description": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", "type": "string", "default": "" }, "nameHeaders": { - "description": "NameHeaders is the set of headers to check for the display name", + "description": "nameHeaders is the set of headers to check for the display name", "type": "array", "items": { "type": "string", @@ -17792,7 +18263,7 @@ } }, "preferredUsernameHeaders": { - "description": "PreferredUsernameHeaders is the set of headers to check for the preferred username", + "description": "preferredUsernameHeaders is the set of headers to check for the preferred username", "type": "array", "items": { "type": "string", @@ -17809,7 +18280,7 @@ ], "properties": { "subdomain": { - "description": "Subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", + "description": "subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", "type": "string", "default": "" } @@ -17825,18 +18296,18 @@ ], "properties": { "mcsAllocatorRange": { - "description": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "description": "mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", "type": "string", "default": "" }, "mcsLabelsPerProject": { - "description": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", + "description": "mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", "type": "integer", "format": "int32", "default": 0 }, "uidAllocatorRange": { - "description": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "description": "uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", "type": "string", "default": "" } @@ -17854,12 +18325,12 @@ ], "properties": { "limitSecretReferences": { - "description": "LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", + "description": "limitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", "type": "boolean", "default": false }, "managedNames": { - "description": "ManagedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", + "description": "managedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", "type": "array", "items": { "type": "string", @@ -17867,17 +18338,17 @@ } }, "masterCA": { - "description": "MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", + "description": "masterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", "type": "string", "default": "" }, "privateKeyFile": { - "description": "PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", + "description": "privateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", "type": "string", "default": "" }, "publicKeyFiles": { - "description": "PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "description": "publicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", "type": "array", "items": { "type": "string", @@ -17894,7 +18365,7 @@ ], "properties": { "signer": { - "description": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", + "description": "signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", "$ref": "#/definitions/com.github.openshift.api.legacyconfig.v1.CertInfo" } } @@ -17912,22 +18383,22 @@ ], "properties": { "bindAddress": { - "description": "BindAddress is the ip:port to serve on", + "description": "bindAddress is the ip:port to serve on", "type": "string", "default": "" }, "bindNetwork": { - "description": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "description": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "cipherSuites": { - "description": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "array", "items": { "type": "string", @@ -17935,21 +18406,21 @@ } }, "clientCA": { - "description": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "description": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, "minTLSVersion": { - "description": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "description": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", "type": "string" }, "namedCertificates": { - "description": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "description": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", "type": "array", "items": { "default": {}, @@ -17968,18 +18439,18 @@ ], "properties": { "sessionMaxAgeSeconds": { - "description": "SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "description": "sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", "type": "integer", "format": "int32", "default": 0 }, "sessionName": { - "description": "SessionName is the cookie name used to store the session", + "description": "sessionName is the cookie name used to store the session", "type": "string", "default": "" }, "sessionSecretsFile": { - "description": "SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "description": "sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", "type": "string", "default": "" } @@ -17994,12 +18465,12 @@ ], "properties": { "authentication": { - "description": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "description": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", "type": "string", "default": "" }, "encryption": { - "description": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", + "description": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", "type": "string", "default": "" } @@ -18021,7 +18492,7 @@ "type": "string" }, "secrets": { - "description": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", + "description": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", "type": "array", "items": { "default": {}, @@ -18051,22 +18522,22 @@ ], "properties": { "env": { - "description": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "description": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", "type": "string", "default": "" }, "file": { - "description": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "description": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile references a file containing the key to use to decrypt the value.", + "description": "keyFile references a file containing the key to use to decrypt the value.", "type": "string", "default": "" }, "value": { - "description": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "description": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", "type": "string", "default": "" } @@ -18083,22 +18554,22 @@ ], "properties": { "env": { - "description": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "description": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", "type": "string", "default": "" }, "file": { - "description": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "description": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile references a file containing the key to use to decrypt the value.", + "description": "keyFile references a file containing the key to use to decrypt the value.", "type": "string", "default": "" }, "value": { - "description": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "description": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", "type": "string", "default": "" } @@ -18113,18 +18584,18 @@ ], "properties": { "accessTokenInactivityTimeoutSeconds": { - "description": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", + "description": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", "type": "integer", "format": "int32" }, "accessTokenMaxAgeSeconds": { - "description": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens", + "description": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", "type": "integer", "format": "int32", "default": 0 }, "authorizeTokenMaxAgeSeconds": { - "description": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "description": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", "type": "integer", "format": "int32", "default": 0 @@ -18141,7 +18612,7 @@ ], "properties": { "httpVerbs": { - "description": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", + "description": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", "type": "array", "items": { "type": "string", @@ -18154,7 +18625,7 @@ "default": "" }, "rejectionMessage": { - "description": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", + "description": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", "type": "string", "default": "" } @@ -18169,7 +18640,7 @@ ], "properties": { "httpVerbs": { - "description": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", + "description": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", "type": "array", "items": { "type": "string", @@ -18193,7 +18664,7 @@ ], "properties": { "defaultRejectionMessage": { - "description": "DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", + "description": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", "type": "string", "default": "" }, @@ -18224,12 +18695,12 @@ ], "properties": { "cacheTTL": { - "description": "CacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", + "description": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", "type": "string", "default": "" }, "configFile": { - "description": "ConfigFile is a path to a Kubeconfig file with the webhook configuration", + "description": "configFile is a path to a Kubeconfig file with the webhook configuration", "type": "string", "default": "" } @@ -18240,12 +18711,12 @@ "type": "object", "properties": { "placement": { - "description": "Placement configures the placement information for this instance.", + "description": "placement configures the placement information for this instance.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.AWSFailureDomainPlacement" }, "subnet": { - "description": "Subnet is a reference to the subnet to use for this instance.", + "description": "subnet is a reference to the subnet to use for this instance.", "$ref": "#/definitions/com.github.openshift.api.machine.v1.AWSResourceReference" } } @@ -18258,7 +18729,7 @@ ], "properties": { "availabilityZone": { - "description": "AvailabilityZone is the availability zone of the instance.", + "description": "availabilityZone is the availability zone of the instance.", "type": "string", "default": "" } @@ -18272,12 +18743,12 @@ ], "properties": { "name": { - "description": "Name of the filter. Filter names are case-sensitive.", + "description": "name of the filter. Filter names are case-sensitive.", "type": "string", "default": "" }, "values": { - "description": "Values includes one or more filter values. Filter values are case-sensitive.", + "description": "values includes one or more filter values. Filter values are case-sensitive.", "type": "array", "items": { "type": "string", @@ -18295,11 +18766,11 @@ ], "properties": { "arn": { - "description": "ARN of resource.", + "description": "arn of resource.", "type": "string" }, "filters": { - "description": "Filters is a set of filters used to identify a resource.", + "description": "filters is a set of filters used to identify a resource.", "type": "array", "items": { "default": {}, @@ -18308,11 +18779,11 @@ "x-kubernetes-list-type": "atomic" }, "id": { - "description": "ID of resource.", + "description": "id of resource.", "type": "string" }, "type": { - "description": "Type determines how the reference will fetch the AWS resource.", + "description": "type determines how the reference will fetch the AWS resource.", "type": "string", "default": "" } @@ -18346,12 +18817,12 @@ "type": "string" }, "bandwidth": { - "description": "Bandwidth describes the internet bandwidth strategy for the instance", + "description": "bandwidth describes the internet bandwidth strategy for the instance", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.BandwidthProperties" }, "credentialsSecret": { - "description": "CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", + "description": "credentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "dataDisk": { @@ -18382,7 +18853,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "ramRoleName": { - "description": "RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", + "description": "ramRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", "type": "string" }, "regionId": { @@ -18391,12 +18862,12 @@ "default": "" }, "resourceGroup": { - "description": "ResourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", + "description": "resourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.AlibabaResourceReference" }, "securityGroups": { - "description": "SecurityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", + "description": "securityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", "type": "array", "items": { "default": {}, @@ -18404,7 +18875,7 @@ } }, "systemDisk": { - "description": "SystemDisk holds the properties regarding the system disk for the instance", + "description": "systemDisk holds the properties regarding the system disk for the instance", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.SystemDiskProperties" }, @@ -18417,15 +18888,15 @@ } }, "tenancy": { - "description": "Tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", + "description": "tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", "type": "string" }, "userDataSecret": { - "description": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "description": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "vSwitch": { - "description": "VSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", + "description": "vSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.AlibabaResourceReference" }, @@ -18479,19 +18950,23 @@ "type": "string" }, "conditions": { - "description": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "description": "conditions is a set of conditions associated with the Machine to indicate errors or other status", "type": "array", "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "instanceId": { - "description": "InstanceID is the instance ID of the machine created in alibabacloud", + "description": "instanceId is the instance ID of the machine created in alibabacloud", "type": "string" }, "instanceState": { - "description": "InstanceState is the state of the alibabacloud instance for this machine", + "description": "instanceState is the state of the alibabacloud instance for this machine", "type": "string" }, "kind": { @@ -18513,15 +18988,15 @@ ], "properties": { "id": { - "description": "ID of resource", + "description": "id of resource", "type": "string" }, "name": { - "description": "Name of the resource", + "description": "name of the resource", "type": "string" }, "tags": { - "description": "Tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", + "description": "tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", "type": "array", "items": { "default": {}, @@ -18558,12 +19033,12 @@ "type": "object", "properties": { "internetMaxBandwidthIn": { - "description": "InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", + "description": "internetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", "type": "integer", "format": "int64" }, "internetMaxBandwidthOut": { - "description": "InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", + "description": "internetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", "type": "integer", "format": "int64" } @@ -18634,8 +19109,12 @@ "template" ], "properties": { + "machineNamePrefix": { + "description": "machineNamePrefix is the prefix used when creating machine names. Each machine name will consist of this prefix, followed by a randomly generated string of 5 characters, and the index of the machine. It must be a lowercase RFC 1123 subdomain, consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted. The prefix must be between 1 and 245 characters in length. For example, if machineNamePrefix is set to 'control-plane', and three machines are created, their names might be: control-plane-abcde-0, control-plane-fghij-1, control-plane-klmno-2", + "type": "string" + }, "replicas": { - "description": "Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", + "description": "replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", "type": "integer", "format": "int32" }, @@ -18645,17 +19124,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "state": { - "description": "State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", + "description": "state defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", "type": "string", "default": "Inactive" }, "strategy": { - "description": "Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", + "description": "strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.ControlPlaneMachineSetStrategy" }, "template": { - "description": "Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", + "description": "template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1.ControlPlaneMachineSetTemplate" } @@ -18666,7 +19145,7 @@ "type": "object", "properties": { "conditions": { - "description": "Conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", + "description": "conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", "type": "array", "items": { "default": {}, @@ -18675,32 +19154,30 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "observedGeneration": { - "description": "ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", + "description": "observedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", "type": "integer", "format": "int64" }, "readyReplicas": { - "description": "ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", + "description": "readyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", "type": "integer", "format": "int32" }, "replicas": { - "description": "Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", + "description": "replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", "type": "integer", "format": "int32" }, "unavailableReplicas": { - "description": "UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", + "description": "unavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", "type": "integer", "format": "int32" }, "updatedReplicas": { - "description": "UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", + "description": "updatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", "type": "integer", "format": "int32" } @@ -18711,7 +19188,7 @@ "type": "object", "properties": { "type": { - "description": "Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", + "description": "type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", "type": "string", "default": "RollingUpdate" } @@ -18720,9 +19197,12 @@ "com.github.openshift.api.machine.v1.ControlPlaneMachineSetTemplate": { "description": "ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet to create the Machines that it will manage in the future.", "type": "object", + "required": [ + "machineType" + ], "properties": { "machineType": { - "description": "MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", + "description": "machineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", "type": "string" }, "machines_v1beta1_machine_openshift_io": { @@ -18747,7 +19227,7 @@ ], "properties": { "annotations": { - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "description": "annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "type": "object", "additionalProperties": { "type": "string", @@ -18819,7 +19299,7 @@ ], "properties": { "aws": { - "description": "AWS configures failure domain information for the AWS platform.", + "description": "aws configures failure domain information for the AWS platform.", "type": "array", "items": { "default": {}, @@ -18828,7 +19308,7 @@ "x-kubernetes-list-type": "atomic" }, "azure": { - "description": "Azure configures failure domain information for the Azure platform.", + "description": "azure configures failure domain information for the Azure platform.", "type": "array", "items": { "default": {}, @@ -18837,7 +19317,7 @@ "x-kubernetes-list-type": "atomic" }, "gcp": { - "description": "GCP configures failure domain information for the GCP platform.", + "description": "gcp configures failure domain information for the GCP platform.", "type": "array", "items": { "default": {}, @@ -18858,7 +19338,7 @@ "x-kubernetes-list-type": "map" }, "openstack": { - "description": "OpenStack configures failure domain information for the OpenStack platform.", + "description": "openstack configures failure domain information for the OpenStack platform.", "type": "array", "items": { "default": {}, @@ -18867,7 +19347,7 @@ "x-kubernetes-list-type": "atomic" }, "platform": { - "description": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", + "description": "platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", "type": "string", "default": "" }, @@ -18906,7 +19386,7 @@ ], "properties": { "zone": { - "description": "Zone is the zone in which the GCP machine provider will create the VM.", + "description": "zone is the zone in which the GCP machine provider will create the VM.", "type": "string", "default": "" } @@ -19131,7 +19611,11 @@ "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -19155,7 +19639,7 @@ "type": "string" }, "type": { - "description": "Type is the identifier type to use for this resource.", + "description": "type is the identifier type to use for this resource.", "type": "string", "default": "" }, @@ -19229,7 +19713,9 @@ "description": "NutanixVMDiskDeviceProperties specifies the disk device properties.", "type": "object", "required": [ - "deviceType" + "deviceType", + "adapterType", + "deviceIndex" ], "properties": { "adapterType": { @@ -19275,7 +19761,7 @@ ], "properties": { "failureDomains": { - "description": "FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", + "description": "failureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", "$ref": "#/definitions/com.github.openshift.api.machine.v1.FailureDomains" }, "metadata": { @@ -19284,7 +19770,7 @@ "$ref": "#/definitions/com.github.openshift.api.machine.v1.ControlPlaneMachineSetTemplateObjectMeta" }, "spec": { - "description": "Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", + "description": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.MachineSpec" } @@ -19399,9 +19885,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "instanceId": { "description": "instanceId is the instance ID of the machine created in PowerVS instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. This will help in updating or deleting a VM in Power VS Cloud", @@ -19426,19 +19910,19 @@ "type": "object", "properties": { "id": { - "description": "ID of resource", + "description": "id of resource", "type": "string" }, "name": { - "description": "Name of resource", + "description": "name of resource", "type": "string" }, "regex": { - "description": "Regex to find resource Regex contains the pattern to match to find a resource", + "description": "regex to find resource Regex contains the pattern to match to find a resource", "type": "string" }, "type": { - "description": "Type identifies the resource type for this entry. Valid values are ID, Name and RegEx", + "description": "type identifies the resource type for this entry. Valid values are ID, Name and RegEx", "type": "string" } }, @@ -19458,7 +19942,7 @@ "type": "object", "properties": { "name": { - "description": "Name of the secret.", + "description": "name of the secret.", "type": "string" } }, @@ -19487,19 +19971,19 @@ "type": "object", "properties": { "category": { - "description": "Category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "description": "category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", "type": "string" }, "name": { - "description": "Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "description": "name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", "type": "string" }, "performanceLevel": { - "description": "PerformanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", + "description": "performanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", "type": "string" }, "size": { - "description": "Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", + "description": "size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", "type": "integer", "format": "int64" } @@ -19713,19 +20197,19 @@ "$ref": "#/definitions/com.github.openshift.api.machine.v1alpha1.Filter" }, "fixedIp": { - "description": "A fixed IPv4 address for the NIC.", + "description": "A fixed IPv4 address for the NIC. Deprecated: fixedIP is silently ignored. Use subnets instead.", "type": "string" }, "noAllowedAddressPairs": { - "description": "NoAllowedAddressPairs disables creation of allowed address pairs for the network ports", + "description": "noAllowedAddressPairs disables creation of allowed address pairs for the network ports", "type": "boolean" }, "portSecurity": { - "description": "PortSecurity optionally enables or disables security on ports managed by OpenStack", + "description": "portSecurity optionally enables or disables security on ports managed by OpenStack", "type": "boolean" }, "portTags": { - "description": "PortTags allows users to specify a list of tags to add to ports created in a given network", + "description": "portTags allows users to specify a list of tags to add to ports created in a given network", "type": "array", "items": { "type": "string", @@ -19845,7 +20329,7 @@ } }, "primarySubnet": { - "description": "The subnet that a set of machines will get ingress/egress traffic from", + "description": "The subnet that a set of machines will get ingress/egress traffic from Deprecated: primarySubnet is silently ignored. Use subnets instead.", "type": "string" }, "rootVolume": { @@ -19877,7 +20361,7 @@ } }, "sshUserName": { - "description": "The machine ssh username", + "description": "The machine ssh username Deprecated: sshUserName is silently ignored.", "type": "string" }, "tags": { @@ -19958,7 +20442,7 @@ } }, "projectID": { - "description": "projectID specifies the project ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended.", + "description": "projectID specifies the project ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: projectID is silently ignored.", "type": "string" }, "securityGroups": { @@ -19978,7 +20462,7 @@ } }, "tenantID": { - "description": "tenantID specifies the tenant ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: use projectID instead. It will be ignored if projectID is set.", + "description": "tenantID specifies the tenant ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: tenantID is silently ignored.", "type": "string" }, "trunk": { @@ -20003,7 +20487,7 @@ "type": "string" }, "diskSize": { - "description": "diskSize specifies the size, in GB, of the created root volume.", + "description": "diskSize specifies the size, in GiB, of the created root volume.", "type": "integer", "format": "int32" }, @@ -20171,7 +20655,7 @@ "type": "string" }, "subnetpoolId": { - "description": "subnetpoolId filters subnets by subnet pool ID.", + "description": "subnetpoolId filters subnets by subnet pool ID. Deprecated: subnetpoolId is silently ignored.", "type": "string" }, "tags": { @@ -20197,11 +20681,11 @@ "$ref": "#/definitions/com.github.openshift.api.machine.v1alpha1.SubnetFilter" }, "portSecurity": { - "description": "PortSecurity optionally enables or disables security on ports managed by OpenStack", + "description": "portSecurity optionally enables or disables security on ports managed by OpenStack Deprecated: portSecurity is silently ignored. Set portSecurity on the parent network instead.", "type": "boolean" }, "portTags": { - "description": "PortTags are tags that are added to ports created on this subnet", + "description": "portTags are tags that are added to ports created on this subnet", "type": "array", "items": { "type": "string", @@ -20226,7 +20710,7 @@ ], "properties": { "ami": { - "description": "AMI is the reference to the AMI from which to create the machine instance.", + "description": "ami is the reference to the AMI from which to create the machine instance.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.AWSResourceReference" }, @@ -20235,7 +20719,7 @@ "type": "string" }, "blockDevices": { - "description": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "description": "blockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", "type": "array", "items": { "default": {}, @@ -20248,26 +20732,26 @@ "default": "" }, "credentialsSecret": { - "description": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "description": "credentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "deviceIndex": { - "description": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "description": "deviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", "type": "integer", "format": "int64", "default": 0 }, "iamInstanceProfile": { - "description": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", + "description": "iamInstanceProfile is a reference to an IAM role to assign to the instance", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.AWSResourceReference" }, "instanceType": { - "description": "InstanceType is the type of instance to create. Example: m4.xlarge", + "description": "instanceType is the type of instance to create. Example: m4.xlarge", "type": "string", "default": "" }, "keyName": { - "description": "KeyName is the name of the KeyPair to use for SSH", + "description": "keyName is the name of the KeyPair to use for SSH", "type": "string" }, "kind": { @@ -20275,33 +20759,37 @@ "type": "string" }, "loadBalancers": { - "description": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "description": "loadBalancers is the set of load balancers to which the new instance should be added once it is created.", "type": "array", "items": { "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.LoadBalancerReference" } }, + "marketType": { + "description": "marketType specifies the type of market for the EC2 instance. Valid values are OnDemand, Spot, CapacityBlock and omitted.\n\nDefaults to OnDemand. When SpotMarketOptions is provided, the marketType defaults to \"Spot\".\n\nWhen set to OnDemand the instance runs as a standard OnDemand instance. When set to Spot the instance runs as a Spot instance. When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. If this value is selected, capacityReservationID must be specified to identify the target reservation.", + "type": "string" + }, "metadata": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "metadataServiceOptions": { - "description": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "description": "metadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.MetadataServiceOptions" }, "networkInterfaceType": { - "description": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", + "description": "networkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", "type": "string" }, "placement": { - "description": "Placement specifies where to create the instance in AWS", + "description": "placement specifies where to create the instance in AWS", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.Placement" }, "placementGroupName": { - "description": "PlacementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", + "description": "placementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", "type": "string" }, "placementGroupPartition": { @@ -20310,11 +20798,11 @@ "format": "int32" }, "publicIp": { - "description": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "description": "publicIp specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", "type": "boolean" }, "securityGroups": { - "description": "SecurityGroups is an array of references to security groups that should be applied to the instance.", + "description": "securityGroups is an array of references to security groups that should be applied to the instance.", "type": "array", "items": { "default": {}, @@ -20322,16 +20810,16 @@ } }, "spotMarketOptions": { - "description": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "description": "spotMarketOptions allows users to configure instances to be run using AWS Spot instances.", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.SpotMarketOptions" }, "subnet": { - "description": "Subnet is a reference to the subnet to use for this instance", + "description": "subnet is a reference to the subnet to use for this instance", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.AWSResourceReference" }, "tags": { - "description": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "description": "tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", "type": "array", "items": { "default": {}, @@ -20339,7 +20827,7 @@ } }, "userDataSecret": { - "description": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "description": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" } } @@ -20381,19 +20869,23 @@ "type": "string" }, "conditions": { - "description": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "description": "conditions is a set of conditions associated with the Machine to indicate errors or other status", "type": "array", "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "instanceId": { - "description": "InstanceID is the instance ID of the machine created in AWS", + "description": "instanceId is the instance ID of the machine created in AWS", "type": "string" }, "instanceState": { - "description": "InstanceState is the state of the AWS instance for this machine", + "description": "instanceState is the state of the AWS instance for this machine", "type": "string" }, "kind": { @@ -20407,11 +20899,11 @@ "type": "object", "properties": { "arn": { - "description": "ARN of resource", + "description": "arn of resource", "type": "string" }, "filters": { - "description": "Filters is a set of filters used to identify a resource", + "description": "filters is a set of filters used to identify a resource", "type": "array", "items": { "default": {}, @@ -20419,7 +20911,7 @@ } }, "id": { - "description": "ID of resource", + "description": "id of resource", "type": "string" } } @@ -20458,11 +20950,11 @@ ], "properties": { "customerManaged": { - "description": "CustomerManaged provides reference to the customer manager storage account.", + "description": "customerManaged provides reference to the customer manager storage account.", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.AzureCustomerManagedBootDiagnostics" }, "storageAccountType": { - "description": "StorageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", + "description": "storageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", "type": "string", "default": "" } @@ -20484,7 +20976,7 @@ ], "properties": { "storageAccountURI": { - "description": "StorageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", + "description": "storageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", "type": "string", "default": "" } @@ -20511,7 +21003,7 @@ ], "properties": { "acceleratedNetworking": { - "description": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", + "description": "acceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", "type": "boolean" }, "apiVersion": { @@ -20527,7 +21019,7 @@ } }, "availabilitySet": { - "description": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", + "description": "availabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", "type": "string" }, "capacityReservationGroupID": { @@ -20535,7 +21027,7 @@ "type": "string" }, "credentialsSecret": { - "description": "CredentialsSecret is a reference to the secret with Azure credentials.", + "description": "credentialsSecret is a reference to the secret with Azure credentials.", "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" }, "dataDisks": { @@ -20547,12 +21039,12 @@ } }, "diagnostics": { - "description": "Diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", + "description": "diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.AzureDiagnostics" }, "image": { - "description": "Image is the OS image to use to create the instance.", + "description": "image is the OS image to use to create the instance.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.Image" }, @@ -20565,11 +21057,11 @@ "type": "string" }, "location": { - "description": "Location is the region to use to create the instance", + "description": "location is the region to use to create the instance", "type": "string" }, "managedIdentity": { - "description": "ManagedIdentity to set managed identity name", + "description": "managedIdentity to set managed identity name", "type": "string" }, "metadata": { @@ -20577,30 +21069,30 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "natRule": { - "description": "NatRule to set inbound NAT rule of the load balancer", + "description": "natRule to set inbound NAT rule of the load balancer", "type": "integer", "format": "int64" }, "networkResourceGroup": { - "description": "NetworkResourceGroup is the resource group for the virtual machine's network", + "description": "networkResourceGroup is the resource group for the virtual machine's network", "type": "string" }, "osDisk": { - "description": "OSDisk represents the parameters for creating the OS disk.", + "description": "osDisk represents the parameters for creating the OS disk.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.OSDisk" }, "publicIP": { - "description": "PublicIP if true a public IP will be used", + "description": "publicIP if true a public IP will be used", "type": "boolean", "default": false }, "publicLoadBalancer": { - "description": "PublicLoadBalancer to use for this instance", + "description": "publicLoadBalancer to use for this instance", "type": "string" }, "resourceGroup": { - "description": "ResourceGroup is the resource group for the virtual machine", + "description": "resourceGroup is the resource group for the virtual machine", "type": "string" }, "securityGroup": { @@ -20608,24 +21100,24 @@ "type": "string" }, "securityProfile": { - "description": "SecurityProfile specifies the Security profile settings for a virtual machine.", + "description": "securityProfile specifies the Security profile settings for a virtual machine.", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.SecurityProfile" }, "spotVMOptions": { - "description": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM", + "description": "spotVMOptions allows the ability to specify the Machine should use a Spot VM", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.SpotVMOptions" }, "sshPublicKey": { - "description": "SSHPublicKey is the public key to use to SSH to the virtual machine.", + "description": "sshPublicKey is the public key to use to SSH to the virtual machine.", "type": "string" }, "subnet": { - "description": "Subnet to use for this instance", + "description": "subnet to use for this instance", "type": "string", "default": "" }, "tags": { - "description": "Tags is a list of tags to apply to the machine.", + "description": "tags is a list of tags to apply to the machine.", "type": "object", "additionalProperties": { "type": "string", @@ -20633,19 +21125,19 @@ } }, "ultraSSDCapability": { - "description": "UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", + "description": "ultraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", "type": "string" }, "userDataSecret": { - "description": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "description": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" }, "vmSize": { - "description": "VMSize is the size of the VM to create.", + "description": "vmSize is the size of the VM to create.", "type": "string" }, "vnet": { - "description": "Vnet to set virtual network name", + "description": "vnet to set virtual network name", "type": "string" }, "zone": { @@ -20663,12 +21155,16 @@ "type": "string" }, "conditions": { - "description": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.", + "description": "conditions is a set of conditions associated with the Machine to indicate errors or other status.", "type": "array", "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -20679,11 +21175,11 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "vmId": { - "description": "VMID is the ID of the virtual machine created in Azure.", + "description": "vmId is the ID of the virtual machine created in Azure.", "type": "string" }, "vmState": { - "description": "VMState is the provisioning state of the Azure virtual machine.", + "description": "vmState is the provisioning state of the Azure virtual machine.", "type": "string" } } @@ -20732,16 +21228,16 @@ "type": "string" }, "severity": { - "description": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + "description": "severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", "type": "string" }, "status": { - "description": "Status of the condition, one of True, False, Unknown.", + "description": "status of the condition, one of True, False, Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + "description": "type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", "type": "string", "default": "" } @@ -20750,6 +21246,9 @@ "com.github.openshift.api.machine.v1beta1.ConfidentialVM": { "description": "ConfidentialVM defines the UEFI settings for the virtual machine.", "type": "object", + "required": [ + "uefiSettings" + ], "properties": { "uefiSettings": { "description": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", @@ -20764,36 +21263,37 @@ "required": [ "nameSuffix", "diskSizeGB", + "lun", "deletionPolicy" ], "properties": { "cachingType": { - "description": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "description": "cachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", "type": "string" }, "deletionPolicy": { - "description": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", + "description": "deletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", "type": "string", "default": "" }, "diskSizeGB": { - "description": "DiskSizeGB is the size in GB to assign to the data disk.", + "description": "diskSizeGB is the size in GB to assign to the data disk.", "type": "integer", "format": "int32", "default": 0 }, "lun": { - "description": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", + "description": "lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", "type": "integer", "format": "int32" }, "managedDisk": { - "description": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", + "description": "managedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.DataDiskManagedDiskParameters" }, "nameSuffix": { - "description": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", + "description": "nameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", "type": "string", "default": "" } @@ -20807,11 +21307,11 @@ ], "properties": { "diskEncryptionSet": { - "description": "DiskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", + "description": "diskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.DiskEncryptionSetParameters" }, "storageAccountType": { - "description": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", + "description": "storageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", "type": "string", "default": "" } @@ -20822,7 +21322,7 @@ "type": "object", "properties": { "id": { - "description": "ID is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", + "description": "id is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", "type": "string" } } @@ -20832,7 +21332,7 @@ "type": "object", "properties": { "ephemeralStorageLocation": { - "description": "EphemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", + "description": "ephemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", "type": "string" } } @@ -20878,12 +21378,12 @@ ], "properties": { "name": { - "description": "Name of the filter. Filter names are case-sensitive.", + "description": "name of the filter. Filter names are case-sensitive.", "type": "string", "default": "" }, "values": { - "description": "Values includes one or more filter values. Filter values are case-sensitive.", + "description": "values includes one or more filter values. Filter values are case-sensitive.", "type": "array", "items": { "type": "string", @@ -20905,26 +21405,26 @@ ], "properties": { "autoDelete": { - "description": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", + "description": "autoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", "type": "boolean", "default": false }, "boot": { - "description": "Boot indicates if this is a boot disk (default false).", + "description": "boot indicates if this is a boot disk (default false).", "type": "boolean", "default": false }, "encryptionKey": { - "description": "EncryptionKey is the customer-supplied encryption key of the disk.", + "description": "encryptionKey is the customer-supplied encryption key of the disk.", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.GCPEncryptionKeyReference" }, "image": { - "description": "Image is the source image to create this disk.", + "description": "image is the source image to create this disk.", "type": "string", "default": "" }, "labels": { - "description": "Labels list of labels to apply to the disk.", + "description": "labels list of labels to apply to the disk.", "type": "object", "additionalProperties": { "type": "string", @@ -20932,13 +21432,13 @@ } }, "sizeGb": { - "description": "SizeGB is the size of the disk (in GB).", + "description": "sizeGb is the size of the disk (in GB).", "type": "integer", "format": "int64", "default": 0 }, "type": { - "description": "Type is the type of the disk (eg: pd-standard).", + "description": "type is the type of the disk (eg: pd-standard).", "type": "string", "default": "" } @@ -20953,7 +21453,7 @@ "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.GCPKMSKeyReference" }, "kmsKeyServiceAccount": { - "description": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", + "description": "kmsKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", "type": "string" } } @@ -20967,13 +21467,13 @@ ], "properties": { "count": { - "description": "Count is the number of GPUs to be attached to an instance.", + "description": "count is the number of GPUs to be attached to an instance.", "type": "integer", "format": "int32", "default": 0 }, "type": { - "description": "Type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", + "description": "type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", "type": "string", "default": "" } @@ -20989,22 +21489,22 @@ ], "properties": { "keyRing": { - "description": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.", + "description": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to.", "type": "string", "default": "" }, "location": { - "description": "Location is the GCP location in which the Key Ring exists.", + "description": "location is the GCP location in which the Key Ring exists.", "type": "string", "default": "" }, "name": { - "description": "Name is the name of the customer managed encryption key to be used for the disk encryption.", + "description": "name is the name of the customer managed encryption key to be used for the disk encryption.", "type": "string", "default": "" }, "projectID": { - "description": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", + "description": "projectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", "type": "string" } } @@ -21026,25 +21526,25 @@ "type": "string" }, "canIPForward": { - "description": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", + "description": "canIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", "type": "boolean", "default": false }, "confidentialCompute": { - "description": "confidentialCompute Defines whether the instance should have confidential compute enabled. If enabled OnHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is false.", + "description": "confidentialCompute is an optional field defining whether the instance should have confidential compute enabled or not, and the confidential computing technology of choice. Allowed values are omitted, Disabled, Enabled, AMDEncryptedVirtualization, AMDEncryptedVirtualizationNestedPaging, and IntelTrustedDomainExtensions When set to Disabled, the machine will not be configured to be a confidential computing instance. When set to Enabled, the machine will be configured as a confidential computing instance with no preference on the confidential compute policy used. In this mode, the platform chooses a default that is subject to change over time. Currently, the default is to use AMD Secure Encrypted Virtualization. When set to AMDEncryptedVirtualization, the machine will be configured as a confidential computing instance with AMD Secure Encrypted Virtualization (AMD SEV) as the confidential computing technology. When set to AMDEncryptedVirtualizationNestedPaging, the machine will be configured as a confidential computing instance with AMD Secure Encrypted Virtualization Secure Nested Paging (AMD SEV-SNP) as the confidential computing technology. When set to IntelTrustedDomainExtensions, the machine will be configured as a confidential computing instance with Intel Trusted Domain Extensions (Intel TDX) as the confidential computing technology. If any value other than Disabled is set the selected machine type must support that specific confidential computing technology. The machine series supporting confidential computing technologies can be checked at https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#all-confidential-vm-instances Currently, AMDEncryptedVirtualization is supported in c2d, n2d, and c3d machines. AMDEncryptedVirtualizationNestedPaging is supported in n2d machines. IntelTrustedDomainExtensions is supported in c3 machines. If any value other than Disabled is set, the selected region must support that specific confidential computing technology. The list of regions supporting confidential computing technologies can be checked at https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#supported-zones If any value other than Disabled is set onHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", "type": "string" }, "credentialsSecret": { - "description": "CredentialsSecret is a reference to the secret with GCP credentials.", + "description": "credentialsSecret is a reference to the secret with GCP credentials.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "deletionProtection": { - "description": "DeletionProtection whether the resource should be protected against deletion.", + "description": "deletionProtection whether the resource should be protected against deletion.", "type": "boolean", "default": false }, "disks": { - "description": "Disks is a list of disks to be attached to the VM.", + "description": "disks is a list of disks to be attached to the VM.", "type": "array", "items": { "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.GCPDisk" @@ -21058,7 +21558,7 @@ } }, "gpus": { - "description": "GPUs is a list of GPUs to be attached to the VM.", + "description": "gpus is a list of GPUs to be attached to the VM.", "type": "array", "items": { "default": {}, @@ -21070,7 +21570,7 @@ "type": "string" }, "labels": { - "description": "Labels list of labels to apply to the VM.", + "description": "labels list of labels to apply to the VM.", "type": "object", "additionalProperties": { "type": "string", @@ -21078,7 +21578,7 @@ } }, "machineType": { - "description": "MachineType is the machine type to use for the VM.", + "description": "machineType is the machine type to use for the VM.", "type": "string", "default": "" }, @@ -21088,26 +21588,26 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "networkInterfaces": { - "description": "NetworkInterfaces is a list of network interfaces to be attached to the VM.", + "description": "networkInterfaces is a list of network interfaces to be attached to the VM.", "type": "array", "items": { "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.GCPNetworkInterface" } }, "onHostMaintenance": { - "description": "OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", + "description": "onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", "type": "string" }, "preemptible": { - "description": "Preemptible indicates if created instance is preemptible.", + "description": "preemptible indicates if created instance is preemptible.", "type": "boolean" }, "projectID": { - "description": "ProjectID is the project in which the GCP machine provider will create the VM.", + "description": "projectID is the project in which the GCP machine provider will create the VM.", "type": "string" }, "region": { - "description": "Region is the region in which the GCP machine provider will create the VM.", + "description": "region is the region in which the GCP machine provider will create the VM.", "type": "string", "default": "" }, @@ -21124,11 +21624,11 @@ "x-kubernetes-list-type": "map" }, "restartPolicy": { - "description": "RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", + "description": "restartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", "type": "string" }, "serviceAccounts": { - "description": "ServiceAccounts is a list of GCP service accounts to be used by the VM.", + "description": "serviceAccounts is a list of GCP service accounts to be used by the VM.", "type": "array", "items": { "default": {}, @@ -21136,12 +21636,12 @@ } }, "shieldedInstanceConfig": { - "description": "ShieldedInstanceConfig is the Shielded VM configuration for the VM", + "description": "shieldedInstanceConfig is the Shielded VM configuration for the VM", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.GCPShieldedInstanceConfig" }, "tags": { - "description": "Tags list of network tags to apply to the VM.", + "description": "tags list of network tags to apply to the VM.", "type": "array", "items": { "type": "string", @@ -21149,7 +21649,7 @@ } }, "targetPools": { - "description": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", + "description": "targetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", "type": "array", "items": { "type": "string", @@ -21157,11 +21657,11 @@ } }, "userDataSecret": { - "description": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "description": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "zone": { - "description": "Zone is the zone in which the GCP machine provider will create the VM.", + "description": "zone is the zone in which the GCP machine provider will create the VM.", "type": "string", "default": "" } @@ -21176,19 +21676,23 @@ "type": "string" }, "conditions": { - "description": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "description": "conditions is a set of conditions associated with the Machine to indicate errors or other status", "type": "array", "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "instanceId": { - "description": "InstanceID is the ID of the instance in GCP", + "description": "instanceId is the ID of the instance in GCP", "type": "string" }, "instanceState": { - "description": "InstanceState is the provisioning state of the GCP Instance.", + "description": "instanceState is the provisioning state of the GCP Instance.", "type": "string" }, "kind": { @@ -21210,12 +21714,12 @@ ], "properties": { "key": { - "description": "Key is the metadata key.", + "description": "key is the metadata key.", "type": "string", "default": "" }, "value": { - "description": "Value is the metadata value.", + "description": "value is the metadata value.", "type": "string" } } @@ -21225,19 +21729,19 @@ "type": "object", "properties": { "network": { - "description": "Network is the network name.", + "description": "network is the network name.", "type": "string" }, "projectID": { - "description": "ProjectID is the project in which the GCP machine provider will create the VM.", + "description": "projectID is the project in which the GCP machine provider will create the VM.", "type": "string" }, "publicIP": { - "description": "PublicIP indicates if true a public IP will be used", + "description": "publicIP indicates if true a public IP will be used", "type": "boolean" }, "subnetwork": { - "description": "Subnetwork is the subnetwork name.", + "description": "subnetwork is the subnetwork name.", "type": "string" } } @@ -21251,12 +21755,12 @@ ], "properties": { "email": { - "description": "Email is the service account email.", + "description": "email is the service account email.", "type": "string", "default": "" }, "scopes": { - "description": "Scopes list of scopes to be assigned to the service account.", + "description": "scopes list of scopes to be assigned to the service account.", "type": "array", "items": { "type": "string", @@ -21270,15 +21774,15 @@ "type": "object", "properties": { "integrityMonitoring": { - "description": "IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "description": "integrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", "type": "string" }, "secureBoot": { - "description": "SecureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", + "description": "secureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", "type": "string" }, "virtualizedTrustedPlatformModule": { - "description": "VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "description": "virtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", "type": "string" } } @@ -21295,31 +21799,31 @@ ], "properties": { "offer": { - "description": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", + "description": "offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", "type": "string", "default": "" }, "publisher": { - "description": "Publisher is the name of the organization that created the image", + "description": "publisher is the name of the organization that created the image", "type": "string", "default": "" }, "resourceID": { - "description": "ResourceID specifies an image to use by ID", + "description": "resourceID specifies an image to use by ID", "type": "string", "default": "" }, "sku": { - "description": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", + "description": "sku specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", "type": "string", "default": "" }, "type": { - "description": "Type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", + "description": "type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", "type": "string" }, "version": { - "description": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", + "description": "version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", "type": "string", "default": "" } @@ -21330,19 +21834,19 @@ "type": "object", "properties": { "description": { - "description": "Description is the human-readable description of the last operation.", + "description": "description is the human-readable description of the last operation.", "type": "string" }, "lastUpdated": { - "description": "LastUpdated is the timestamp at which LastOperation API was last-updated.", + "description": "lastUpdated is the timestamp at which LastOperation API was last-updated.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "state": { - "description": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", + "description": "state is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", "type": "string" }, "type": { - "description": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc", + "description": "type is the type of operation which was last performed. E.g. Create, Delete, Update etc", "type": "string" } } @@ -21356,12 +21860,12 @@ ], "properties": { "name": { - "description": "Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", + "description": "name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", "type": "string", "default": "" }, "owner": { - "description": "Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", + "description": "owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", "type": "string", "default": "" } @@ -21372,7 +21876,7 @@ "type": "object", "properties": { "preDrain": { - "description": "PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", + "description": "preDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", "type": "array", "items": { "default": {}, @@ -21384,7 +21888,7 @@ "x-kubernetes-list-type": "map" }, "preTerminate": { - "description": "PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", + "description": "preTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", "type": "array", "items": { "default": {}, @@ -21517,7 +22021,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" }, "remediationTemplate": { - "description": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", + "description": "remediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "selector": { @@ -21526,7 +22030,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "unhealthyConditions": { - "description": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", + "description": "unhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", "type": "array", "items": { "default": {}, @@ -21544,7 +22048,7 @@ ], "properties": { "conditions": { - "description": "Conditions defines the current state of the MachineHealthCheck", + "description": "conditions defines the current state of the MachineHealthCheck", "type": "array", "items": { "default": {}, @@ -21566,7 +22070,7 @@ "format": "int32" }, "remediationsAllowed": { - "description": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", + "description": "remediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", "type": "integer", "format": "int32", "default": 0 @@ -21671,26 +22175,26 @@ "default": "MachineAPI" }, "deletePolicy": { - "description": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", + "description": "deletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", "type": "string" }, "minReadySeconds": { - "description": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + "description": "minReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", "type": "integer", "format": "int32" }, "replicas": { - "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + "description": "replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", "type": "integer", "format": "int32" }, "selector": { - "description": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "description": "selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "template": { - "description": "Template is the object that describes the machine that will be created if insufficient replicas are detected.", + "description": "template is the object that describes the machine that will be created if insufficient replicas are detected.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.MachineTemplateSpec" } @@ -21713,7 +22217,7 @@ "format": "int32" }, "conditions": { - "description": "Conditions defines the current state of the MachineSet", + "description": "conditions defines the current state of the MachineSet", "type": "array", "items": { "default": {}, @@ -21737,7 +22241,7 @@ "format": "int32" }, "observedGeneration": { - "description": "ObservedGeneration reflects the generation of the most recently observed MachineSet.", + "description": "observedGeneration reflects the generation of the most recently observed MachineSet.", "type": "integer", "format": "int64" }, @@ -21747,7 +22251,7 @@ "format": "int32" }, "replicas": { - "description": "Replicas is the most recently observed number of replicas.", + "description": "replicas is the most recently observed number of replicas.", "type": "integer", "format": "int32", "default": 0 @@ -21769,7 +22273,7 @@ "default": "MachineAPI" }, "lifecycleHooks": { - "description": "LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", + "description": "lifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.LifecycleHooks" }, @@ -21779,11 +22283,11 @@ "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.ObjectMeta" }, "providerID": { - "description": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", + "description": "providerID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", "type": "string" }, "providerSpec": { - "description": "ProviderSpec details Provider-specific configuration to use during node creation.", + "description": "providerSpec details Provider-specific configuration to use during node creation.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.ProviderSpec" }, @@ -21803,7 +22307,7 @@ "type": "object", "properties": { "addresses": { - "description": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", + "description": "addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", "type": "array", "items": { "default": {}, @@ -21816,7 +22320,7 @@ "type": "string" }, "conditions": { - "description": "Conditions defines the current state of the Machine", + "description": "conditions defines the current state of the Machine", "type": "array", "items": { "default": {}, @@ -21828,31 +22332,31 @@ "x-kubernetes-list-type": "map" }, "errorMessage": { - "description": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "description": "errorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", "type": "string" }, "errorReason": { - "description": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "description": "errorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", "type": "string" }, "lastOperation": { - "description": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", + "description": "lastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.LastOperation" }, "lastUpdated": { - "description": "LastUpdated identifies when this status was last observed.", + "description": "lastUpdated identifies when this status was last observed.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "nodeRef": { - "description": "NodeRef will point to the corresponding Node if it exists.", + "description": "nodeRef will point to the corresponding Node if it exists.", "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "phase": { - "description": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", + "description": "phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", "type": "string" }, "providerStatus": { - "description": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", + "description": "providerStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "synchronizedGeneration": { @@ -21883,7 +22387,7 @@ "type": "object", "properties": { "authentication": { - "description": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "description": "authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", "type": "string" } } @@ -21934,7 +22438,7 @@ ], "properties": { "devices": { - "description": "Devices defines the virtual machine's network interfaces.", + "description": "devices defines the virtual machine's network interfaces.", "type": "array", "items": { "default": {}, @@ -21952,27 +22456,27 @@ ], "properties": { "cachingType": { - "description": "CachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", + "description": "cachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", "type": "string" }, "diskSettings": { - "description": "DiskSettings describe ephemeral disk settings for the os disk.", + "description": "diskSettings describe ephemeral disk settings for the os disk.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.DiskSettings" }, "diskSizeGB": { - "description": "DiskSizeGB is the size in GB to assign to the data disk.", + "description": "diskSizeGB is the size in GB to assign to the data disk.", "type": "integer", "format": "int32", "default": 0 }, "managedDisk": { - "description": "ManagedDisk specifies the Managed Disk parameters for the OS disk.", + "description": "managedDisk specifies the Managed Disk parameters for the OS disk.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.OSDiskManagedDiskParameters" }, "osType": { - "description": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", + "description": "osType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", "type": "string", "default": "" } @@ -21986,7 +22490,7 @@ ], "properties": { "diskEncryptionSet": { - "description": "DiskEncryptionSet is the disk encryption set properties", + "description": "diskEncryptionSet is the disk encryption set properties", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.DiskEncryptionSetParameters" }, "securityProfile": { @@ -21995,7 +22499,7 @@ "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.VMDiskSecurityProfile" }, "storageAccountType": { - "description": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", + "description": "storageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", "type": "string", "default": "" } @@ -22006,7 +22510,7 @@ "type": "object", "properties": { "annotations": { - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "description": "annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "type": "object", "additionalProperties": { "type": "string", @@ -22014,7 +22518,7 @@ } }, "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "description": "generateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "type": "string" }, "labels": { @@ -22026,11 +22530,11 @@ } }, "name": { - "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "description": "name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "type": "string" }, "namespace": { - "description": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "description": "namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", "type": "string" }, "ownerReferences": { @@ -22054,15 +22558,15 @@ "type": "object", "properties": { "availabilityZone": { - "description": "AvailabilityZone is the availability zone of the instance", + "description": "availabilityZone is the availability zone of the instance", "type": "string" }, "region": { - "description": "Region is the region to use to create the instance", + "description": "region is the region to use to create the instance", "type": "string" }, "tenancy": { - "description": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", + "description": "tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", "type": "string" } } @@ -22072,7 +22576,7 @@ "type": "object", "properties": { "value": { - "description": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", + "description": "value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" } } @@ -22121,6 +22625,9 @@ "com.github.openshift.api.machine.v1beta1.SecuritySettings": { "description": "SecuritySettings define the security type and the UEFI settings of the virtual machine.", "type": "object", + "required": [ + "securityType" + ], "properties": { "confidentialVM": { "description": "confidentialVM specifies the security configuration of the virtual machine. For more information regarding Confidential VMs, please refer to: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview", @@ -22160,7 +22667,7 @@ "type": "object", "properties": { "maxPrice": { - "description": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", + "description": "maxPrice defines the maximum price the user is willing to pay for Spot VM instances", "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" } } @@ -22174,12 +22681,12 @@ ], "properties": { "name": { - "description": "Name of the tag", + "description": "name of the tag", "type": "string", "default": "" }, "value": { - "description": "Value of the tag", + "description": "value of the tag", "type": "string", "default": "" } @@ -22188,6 +22695,9 @@ "com.github.openshift.api.machine.v1beta1.TrustedLaunch": { "description": "TrustedLaunch defines the UEFI settings for the virtual machine.", "type": "object", + "required": [ + "uefiSettings" + ], "properties": { "uefiSettings": { "description": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", @@ -22248,6 +22758,31 @@ } } }, + "com.github.openshift.api.machine.v1beta1.VSphereDisk": { + "description": "VSphereDisk describes additional disks for vSphere.", + "type": "object", + "required": [ + "name", + "sizeGiB" + ], + "properties": { + "name": { + "description": "name is used to identify the disk definition. name is required needs to be unique so that it can be used to clearly identify purpose of the disk. It must be at most 80 characters in length and must consist only of alphanumeric characters, hyphens and underscores, and must start and end with an alphanumeric character.", + "type": "string", + "default": "" + }, + "provisioningMode": { + "description": "provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. Allowed values are \"Thin\", \"Thick\", \"EagerlyZeroed\", and omitted. When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. When set to Thick, the full disk size will be allocated when disk is created. When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere.", + "type": "string" + }, + "sizeGiB": { + "description": "sizeGiB is the size of the disk in GiB. The maximum supported size 16384 GiB.", + "type": "integer", + "format": "int32", + "default": 0 + } + } + }, "com.github.openshift.api.machine.v1beta1.VSphereMachineProviderSpec": { "description": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "type": "object", @@ -22261,15 +22796,27 @@ "type": "string" }, "cloneMode": { - "description": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", + "description": "cloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", "type": "string" }, "credentialsSecret": { - "description": "CredentialsSecret is a reference to the secret with vSphere credentials.", + "description": "credentialsSecret is a reference to the secret with vSphere credentials.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, + "dataDisks": { + "description": "dataDisks is a list of non OS disks to be created and attached to the VM. The max number of disk allowed to be attached is currently 29. The max number of disks for any controller is 30, but VM template will always have OS disk so that will leave 29 disks on any controller type.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.VSphereDisk" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "diskGiB": { - "description": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", + "description": "diskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", "type": "integer", "format": "int32" }, @@ -22278,7 +22825,7 @@ "type": "string" }, "memoryMiB": { - "description": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "description": "memoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", "type": "integer", "format": "int64" }, @@ -22287,12 +22834,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "network": { - "description": "Network is the network configuration for this machine's VM.", + "description": "network is the network configuration for this machine's VM.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.NetworkSpec" }, "numCPUs": { - "description": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "description": "numCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", "type": "integer", "format": "int32" }, @@ -22302,7 +22849,7 @@ "format": "int32" }, "snapshot": { - "description": "Snapshot is the name of the snapshot from which the VM was cloned", + "description": "snapshot is the name of the snapshot from which the VM was cloned", "type": "string", "default": "" }, @@ -22315,16 +22862,16 @@ } }, "template": { - "description": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.", + "description": "template is the name, inventory path, or instance UUID of the template used to clone new machines.", "type": "string", "default": "" }, "userDataSecret": { - "description": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "description": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "workspace": { - "description": "Workspace describes the workspace to use for the machine.", + "description": "workspace describes the workspace to use for the machine.", "$ref": "#/definitions/com.github.openshift.api.machine.v1beta1.Workspace" } } @@ -22338,19 +22885,23 @@ "type": "string" }, "conditions": { - "description": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "description": "conditions is a set of conditions associated with the Machine to indicate errors or other status", "type": "array", "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" }, "instanceId": { - "description": "InstanceID is the ID of the instance in VSphere", + "description": "instanceId is the ID of the instance in VSphere", "type": "string" }, "instanceState": { - "description": "InstanceState is the provisioning state of the VSphere Instance.", + "description": "instanceState is the provisioning state of the VSphere Instance.", "type": "string" }, "kind": { @@ -22358,7 +22909,7 @@ "type": "string" }, "taskRef": { - "description": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", + "description": "taskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", "type": "string" } } @@ -22368,23 +22919,27 @@ "type": "object", "properties": { "datacenter": { - "description": "Datacenter is the datacenter in which VMs are created/located.", + "description": "datacenter is the datacenter in which VMs are created/located.", "type": "string" }, "datastore": { - "description": "Datastore is the datastore in which VMs are created/located.", + "description": "datastore is the datastore in which VMs are created/located.", "type": "string" }, "folder": { - "description": "Folder is the folder in which VMs are created/located.", + "description": "folder is the folder in which VMs are created/located.", "type": "string" }, "resourcePool": { - "description": "ResourcePool is the resource pool in which VMs are created/located.", + "description": "resourcePool is the resource pool in which VMs are created/located.", "type": "string" }, "server": { - "description": "Server is the IP address or FQDN of the vSphere endpoint.", + "description": "server is the IP address or FQDN of the vSphere endpoint.", + "type": "string" + }, + "vmGroup": { + "description": "vmGroup is the cluster vm group in which virtual machines will be added for vm host group based zonal.", "type": "string" } } @@ -22621,6 +23176,7 @@ "description": "MachineConfigNodeStatus holds the reported information on a particular machine config node.", "type": "object", "required": [ + "observedGeneration", "configVersion" ], "properties": { @@ -22634,9 +23190,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "configVersion": { "description": "configVersion describes the current and desired machine config for this node. The current version represents the current machine config for the node and is updated after a successful update. The desired version represents the machine config the node will attempt to update to. This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists.", @@ -22860,9 +23414,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "finalImagePullspec": { "description": "finalImagePushSpec describes the fully qualified pushspec produced by this build that the final image can be. Must be in sha format.", @@ -22890,7 +23442,7 @@ "$ref": "#/definitions/com.github.openshift.api.machineconfiguration.v1alpha1.ObjectReference" }, "imageBuilderType": { - "description": "ImageBuilderType describes the image builder set in the MachineOSConfig", + "description": "imageBuilderType describes the image builder set in the MachineOSConfig", "type": "string", "default": "" } @@ -23005,6 +23557,9 @@ "com.github.openshift.api.machineconfiguration.v1alpha1.MachineOSConfigStatus": { "description": "MachineOSConfigStatus describes the status this config object and relates it to the builds associated with this MachineOSConfig", "type": "object", + "required": [ + "observedGeneration" + ], "properties": { "conditions": { "description": "conditions are state related conditions for the config.", @@ -23016,9 +23571,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" }, "currentImagePullspec": { "description": "currentImagePullspec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This must include sha256.", @@ -23202,9 +23755,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" } } }, @@ -23257,9 +23808,6 @@ "com.github.openshift.api.monitoring.v1.AlertRelabelConfigList": { "description": "AlertRelabelConfigList is a list of AlertRelabelConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "type": "object", - "required": [ - "items" - ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -23269,6 +23817,7 @@ "description": "items is a list of AlertRelabelConfigs.", "type": "array", "items": { + "default": {}, "$ref": "#/definitions/com.github.openshift.api.monitoring.v1.AlertRelabelConfig" } }, @@ -23310,7 +23859,11 @@ "items": { "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" - } + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" } } }, @@ -23349,9 +23902,6 @@ "com.github.openshift.api.monitoring.v1.AlertingRuleList": { "description": "AlertingRuleList is a list of AlertingRule objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "type": "object", - "required": [ - "items" - ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -23545,7 +24095,7 @@ "type": "string" }, "clusterNetworks": { - "description": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", + "description": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", "type": "array", "items": { "default": {}, @@ -23553,7 +24103,7 @@ } }, "hostsubnetlength": { - "description": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", + "description": "hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", "type": "integer", "format": "int64" }, @@ -23567,25 +24117,25 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "mtu": { - "description": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", + "description": "mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", "type": "integer", "format": "int64" }, "network": { - "description": "Network is a CIDR string specifying the global overlay network's L3 space", + "description": "network is a CIDR string specifying the global overlay network's L3 space", "type": "string" }, "pluginName": { - "description": "PluginName is the name of the network plugin being used", + "description": "pluginName is the name of the network plugin being used", "type": "string" }, "serviceNetwork": { - "description": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from", + "description": "serviceNetwork is the CIDR range that Service IP addresses are allocated from", "type": "string", "default": "" }, "vxlanPort": { - "description": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", + "description": "vxlanPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", "type": "integer", "format": "int64" } @@ -23605,7 +24155,7 @@ "default": "" }, "hostSubnetLength": { - "description": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", + "description": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", "type": "integer", "format": "int64", "default": 0 @@ -23624,7 +24174,7 @@ "type": "string" }, "items": { - "description": "Items is the list of cluster networks", + "description": "items is the list of cluster networks", "type": "array", "items": { "default": {}, @@ -23704,11 +24254,11 @@ "type": "object", "properties": { "cidrSelector": { - "description": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", + "description": "cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", "type": "string" }, "dnsName": { - "description": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", + "description": "dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", "type": "string" } } @@ -23764,7 +24314,7 @@ "type": "string" }, "egressCIDRs": { - "description": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", + "description": "egressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", "type": "array", "items": { "type": "string", @@ -23772,7 +24322,7 @@ } }, "egressIPs": { - "description": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", + "description": "egressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", "type": "array", "items": { "type": "string", @@ -23780,12 +24330,12 @@ } }, "host": { - "description": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)", + "description": "host is the name of the node. (This is the same as the object's name, but both fields must be set.)", "type": "string", "default": "" }, "hostIP": { - "description": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network", + "description": "hostIP is the IP address to be used as a VTEP by other nodes in the overlay network", "type": "string", "default": "" }, @@ -23799,7 +24349,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "subnet": { - "description": "Subnet is the CIDR range of the overlay network assigned to the node for its pods", + "description": "subnet is the CIDR range of the overlay network assigned to the node for its pods", "type": "string", "default": "" } @@ -23817,7 +24367,7 @@ "type": "string" }, "items": { - "description": "Items is the list of host subnets", + "description": "items is the list of host subnets", "type": "array", "items": { "default": {}, @@ -23848,7 +24398,7 @@ "type": "string" }, "egressIPs": { - "description": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", + "description": "egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", "type": "array", "items": { "type": "string", @@ -23865,13 +24415,13 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "netid": { - "description": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", + "description": "netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", "type": "integer", "format": "int64", "default": 0 }, "netname": { - "description": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", + "description": "netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", "type": "string", "default": "" } @@ -23889,7 +24439,7 @@ "type": "string" }, "items": { - "description": "Items is the list of net namespaces", + "description": "items is the list of net namespaces", "type": "array", "items": { "default": {}, @@ -24123,7 +24673,7 @@ } }, "mode": { - "description": "Mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", + "description": "mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", "type": "string", "default": "" }, @@ -24133,7 +24683,7 @@ "$ref": "#/definitions/com.github.openshift.api.networkoperator.v1.EgressRouterInterface" }, "redirect": { - "description": "Redirect represents the configuration parameters specific to redirect mode.", + "description": "redirect represents the configuration parameters specific to redirect mode.", "$ref": "#/definitions/com.github.openshift.api.networkoperator.v1.RedirectConfig" } } @@ -24148,12 +24698,12 @@ ], "properties": { "allowEscalation": { - "description": "AllowEscalation indicates whether you can request roles and their escalating resources", + "description": "allowEscalation indicates whether you can request roles and their escalating resources", "type": "boolean", "default": false }, "namespaces": { - "description": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)", + "description": "namespaces is the list of namespaces that can be referenced. * means any of them (including *)", "type": "array", "items": { "type": "string", @@ -24161,7 +24711,7 @@ } }, "roleNames": { - "description": "RoleNames is the list of cluster roles that can referenced. * means anything", + "description": "roleNames is the list of cluster roles that can referenced. * means anything", "type": "array", "items": { "type": "string", @@ -24179,20 +24729,20 @@ "type": "string" }, "authorizeToken": { - "description": "AuthorizeToken contains the token that authorized this token", + "description": "authorizeToken contains the token that authorized this token", "type": "string" }, "clientName": { - "description": "ClientName references the client that created this token.", + "description": "clientName references the client that created this token.", "type": "string" }, "expiresIn": { - "description": "ExpiresIn is the seconds from CreationTime before this token expires.", + "description": "expiresIn is the seconds from CreationTime before this token expires.", "type": "integer", "format": "int64" }, "inactivityTimeoutSeconds": { - "description": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", + "description": "inactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", "type": "integer", "format": "int32" }, @@ -24206,15 +24756,15 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "redirectURI": { - "description": "RedirectURI is the redirection associated with the token.", + "description": "redirectURI is the redirection associated with the token.", "type": "string" }, "refreshToken": { - "description": "RefreshToken is the value by which this token can be renewed. Can be blank.", + "description": "refreshToken is the value by which this token can be renewed. Can be blank.", "type": "string" }, "scopes": { - "description": "Scopes is an array of the requested scopes.", + "description": "scopes is an array of the requested scopes.", "type": "array", "items": { "type": "string", @@ -24222,11 +24772,11 @@ } }, "userName": { - "description": "UserName is the user name associated with this token", + "description": "userName is the user name associated with this token", "type": "string" }, "userUID": { - "description": "UserUID is the unique UID associated with this token", + "description": "userUID is the unique UID associated with this token", "type": "string" } } @@ -24243,7 +24793,7 @@ "type": "string" }, "items": { - "description": "Items is the list of OAuth access tokens", + "description": "items is the list of OAuth access tokens", "type": "array", "items": { "default": {}, @@ -24270,19 +24820,19 @@ "type": "string" }, "clientName": { - "description": "ClientName references the client that created this token.", + "description": "clientName references the client that created this token.", "type": "string" }, "codeChallenge": { - "description": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", + "description": "codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", "type": "string" }, "codeChallengeMethod": { - "description": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", + "description": "codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", "type": "string" }, "expiresIn": { - "description": "ExpiresIn is the seconds from CreationTime before this token expires.", + "description": "expiresIn is the seconds from CreationTime before this token expires.", "type": "integer", "format": "int64" }, @@ -24296,11 +24846,11 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "redirectURI": { - "description": "RedirectURI is the redirection associated with the token.", + "description": "redirectURI is the redirection associated with the token.", "type": "string" }, "scopes": { - "description": "Scopes is an array of the requested scopes.", + "description": "scopes is an array of the requested scopes.", "type": "array", "items": { "type": "string", @@ -24308,15 +24858,15 @@ } }, "state": { - "description": "State data from request", + "description": "state data from request", "type": "string" }, "userName": { - "description": "UserName is the user name associated with this token", + "description": "userName is the user name associated with this token", "type": "string" }, "userUID": { - "description": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", + "description": "userUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", "type": "string" } } @@ -24333,7 +24883,7 @@ "type": "string" }, "items": { - "description": "Items is the list of OAuth authorization tokens", + "description": "items is the list of OAuth authorization tokens", "type": "array", "items": { "default": {}, @@ -24356,17 +24906,17 @@ "type": "object", "properties": { "accessTokenInactivityTimeoutSeconds": { - "description": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", + "description": "accessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", "type": "integer", "format": "int32" }, "accessTokenMaxAgeSeconds": { - "description": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", + "description": "accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", "type": "integer", "format": "int32" }, "additionalSecrets": { - "description": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", + "description": "additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", "type": "array", "items": { "type": "string", @@ -24378,7 +24928,7 @@ "type": "string" }, "grantMethod": { - "description": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", + "description": "grantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", "type": "string" }, "kind": { @@ -24391,7 +24941,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "redirectURIs": { - "description": "RedirectURIs is the valid redirection URIs associated with a client", + "description": "redirectURIs is the valid redirection URIs associated with a client", "type": "array", "items": { "type": "string", @@ -24400,11 +24950,11 @@ "x-kubernetes-patch-strategy": "merge" }, "respondWithChallenges": { - "description": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", + "description": "respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", "type": "boolean" }, "scopeRestrictions": { - "description": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", + "description": "scopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", "type": "array", "items": { "default": {}, @@ -24412,7 +24962,7 @@ } }, "secret": { - "description": "Secret is the unique secret associated with a client", + "description": "secret is the unique secret associated with a client", "type": "string" } } @@ -24426,7 +24976,7 @@ "type": "string" }, "clientName": { - "description": "ClientName references the client that created this authorization", + "description": "clientName references the client that created this authorization", "type": "string" }, "kind": { @@ -24439,7 +24989,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "scopes": { - "description": "Scopes is an array of the granted scopes.", + "description": "scopes is an array of the granted scopes.", "type": "array", "items": { "type": "string", @@ -24447,11 +24997,11 @@ } }, "userName": { - "description": "UserName is the user name that authorized this client", + "description": "userName is the user name that authorized this client", "type": "string" }, "userUID": { - "description": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", + "description": "userUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", "type": "string" } } @@ -24468,7 +25018,7 @@ "type": "string" }, "items": { - "description": "Items is the list of OAuth client authorizations", + "description": "items is the list of OAuth client authorizations", "type": "array", "items": { "default": {}, @@ -24498,7 +25048,7 @@ "type": "string" }, "items": { - "description": "Items is the list of OAuth clients", + "description": "items is the list of OAuth clients", "type": "array", "items": { "default": {}, @@ -24571,7 +25121,7 @@ "type": "object", "properties": { "clusterRole": { - "description": "ClusterRole describes a set of restrictions for cluster role scoping.", + "description": "clusterRole describes a set of restrictions for cluster role scoping.", "$ref": "#/definitions/com.github.openshift.api.oauth.v1.ClusterRoleScopeRestriction" }, "literals": { @@ -24593,20 +25143,20 @@ "type": "string" }, "authorizeToken": { - "description": "AuthorizeToken contains the token that authorized this token", + "description": "authorizeToken contains the token that authorized this token", "type": "string" }, "clientName": { - "description": "ClientName references the client that created this token.", + "description": "clientName references the client that created this token.", "type": "string" }, "expiresIn": { - "description": "ExpiresIn is the seconds from CreationTime before this token expires.", + "description": "expiresIn is the seconds from CreationTime before this token expires.", "type": "integer", "format": "int64" }, "inactivityTimeoutSeconds": { - "description": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", + "description": "inactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", "type": "integer", "format": "int32" }, @@ -24620,15 +25170,15 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "redirectURI": { - "description": "RedirectURI is the redirection associated with the token.", + "description": "redirectURI is the redirection associated with the token.", "type": "string" }, "refreshToken": { - "description": "RefreshToken is the value by which this token can be renewed. Can be blank.", + "description": "refreshToken is the value by which this token can be renewed. Can be blank.", "type": "string" }, "scopes": { - "description": "Scopes is an array of the requested scopes.", + "description": "scopes is an array of the requested scopes.", "type": "array", "items": { "type": "string", @@ -24636,11 +25186,11 @@ } }, "userName": { - "description": "UserName is the user name associated with this token", + "description": "userName is the user name associated with this token", "type": "string" }, "userUID": { - "description": "UserUID is the unique UID associated with this token", + "description": "userUID is the unique UID associated with this token", "type": "string" } } @@ -24842,12 +25392,12 @@ ], "properties": { "cidr": { - "description": "CIDR defines the total range of a cluster networks address space.", + "description": "cidr defines the total range of a cluster networks address space.", "type": "string", "default": "" }, "hostSubnetLength": { - "description": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", + "description": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", "type": "integer", "format": "int64", "default": 0 @@ -24946,12 +25496,12 @@ ], "properties": { "format": { - "description": "Format is the format of the name to be built for the system component", + "description": "format is the format of the name to be built for the system component", "type": "string", "default": "" }, "latest": { - "description": "Latest determines if the latest tag will be pulled from the registry", + "description": "latest determines if the latest tag will be pulled from the registry", "type": "boolean", "default": false } @@ -25400,12 +25950,12 @@ ], "properties": { "domainName": { - "description": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "description": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", "type": "string", "default": "" }, "insecure": { - "description": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "description": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", "type": "boolean" } } @@ -25455,18 +26005,18 @@ ], "properties": { "mcsAllocatorRange": { - "description": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "description": "mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", "type": "string", "default": "" }, "mcsLabelsPerProject": { - "description": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", + "description": "mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", "type": "integer", "format": "int32", "default": 0 }, "uidAllocatorRange": { - "description": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "description": "uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", "type": "string", "default": "" } @@ -25496,7 +26046,7 @@ ], "properties": { "signer": { - "description": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", + "description": "signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", "$ref": "#/definitions/com.github.openshift.api.config.v1.CertInfo" } } @@ -25727,7 +26277,7 @@ "type": "string" }, "simpleMacvlanConfig": { - "description": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", + "description": "simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", "$ref": "#/definitions/com.github.openshift.api.operator.v1.SimpleMacvlanConfig" }, "type": { @@ -25883,7 +26433,7 @@ "format": "int32" }, "oauthAPIServer": { - "description": "OAuthAPIServer holds status specific only to oauth-apiserver", + "description": "oauthAPIServer holds status specific only to oauth-apiserver", "default": {}, "$ref": "#/definitions/com.github.openshift.api.operator.v1.OAuthAPIServerStatus" }, @@ -25969,7 +26519,7 @@ "$ref": "#/definitions/com.github.openshift.api.operator.v1.IBMCloudCSIDriverConfigSpec" }, "vSphere": { - "description": "vsphere is used to configure the vsphere CSI driver.", + "description": "vSphere is used to configure the vsphere CSI driver.", "$ref": "#/definitions/com.github.openshift.api.operator.v1.VSphereCSIDriverConfigSpec" } }, @@ -26270,7 +26820,7 @@ ], "properties": { "credentialsMode": { - "description": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", + "description": "credentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", "type": "string" }, "logLevel": { @@ -26443,7 +26993,7 @@ "type": "string" }, "storageClassState": { - "description": "StorageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", + "description": "storageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", "type": "string" }, "unsupportedConfigOverrides": { @@ -26571,7 +27121,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -26589,6 +27139,26 @@ } } }, + "com.github.openshift.api.operator.v1.ConfigMapFileReference": { + "description": "ConfigMapFileReference references a specific file within a ConfigMap.", + "type": "object", + "required": [ + "name", + "key" + ], + "properties": { + "key": { + "description": "key is the logo key inside the referenced ConfigMap. Must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.). Must be at most 253 characters in length. Must end in a valid file extension. A valid file extension must consist of a period followed by 2 to 5 alpha characters.", + "type": "string", + "default": "" + }, + "name": { + "description": "name is the name of the ConfigMap. name is a required field. Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. Must be at most 253 characters in length.", + "type": "string", + "default": "" + } + } + }, "com.github.openshift.api.operator.v1.ConfigSpec": { "type": "object", "required": [ @@ -26723,7 +27293,7 @@ } }, "com.github.openshift.api.operator.v1.ConsoleCustomization": { - "description": "ConsoleCustomization defines a list of optional configuration for the console UI.", + "description": "ConsoleCustomization defines a list of optional configuration for the console UI. Ensure that Logos and CustomLogoFile cannot be set at the same time.", "type": "object", "properties": { "addPage": { @@ -26748,7 +27318,7 @@ "x-kubernetes-list-type": "map" }, "customLogoFile": { - "description": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", + "description": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a Only one of customLogoFile or logos can be set at a time. ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. The recommended file format for the logo is SVG, but other file formats are allowed if supported by the browser. Deprecated: Use logos instead.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.ConfigMapFileReference" }, @@ -26765,6 +27335,18 @@ "description": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", "type": "string" }, + "logos": { + "description": "logos is used to replace the OpenShift Masthead and Favicon logos in the console UI with custom logos. logos is an optional field that allows a list of logos. Only one of logos or customLogoFile can be set at a time. If logos is set, customLogoFile must be unset. When specified, there must be at least one entry and no more than 2 entries. Each type must appear only once in the list.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.Logo" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, "perspectives": { "description": "perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown.", "type": "array", @@ -27169,7 +27751,7 @@ ], "properties": { "openshiftSDNConfig": { - "description": "openShiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", + "description": "openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", "$ref": "#/definitions/com.github.openshift.api.operator.v1.OpenShiftSDNConfig" }, "ovnKubernetesConfig": { @@ -27192,7 +27774,7 @@ ], "properties": { "id": { - "description": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "description": "id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", "type": "string", "default": "" }, @@ -27228,7 +27810,7 @@ ], "properties": { "id": { - "description": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "description": "id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", "type": "string", "default": "" }, @@ -27269,6 +27851,9 @@ "com.github.openshift.api.operator.v1.DeveloperConsoleCatalogTypes": { "description": "DeveloperConsoleCatalogTypes defines the state of the sub-catalog types.", "type": "object", + "required": [ + "state" + ], "properties": { "disabled": { "description": "disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden.", @@ -27400,7 +27985,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -27593,6 +28178,27 @@ } } }, + "com.github.openshift.api.operator.v1.FileReferenceSource": { + "description": "FileReferenceSource is used by the console to locate the specified file containing a custom logo.", + "type": "object", + "required": [ + "from" + ], + "properties": { + "configMap": { + "description": "configMap specifies the ConfigMap sourcing details such as the name of the ConfigMap and the key for the file. The ConfigMap must exist in the openshift-config namespace. Required when from is \"ConfigMap\", and forbidden otherwise.", + "$ref": "#/definitions/com.github.openshift.api.operator.v1.ConfigMapFileReference" + }, + "from": { + "description": "from is a required field to specify the source type of the file reference. Allowed values are ConfigMap. When set to ConfigMap, the file will be sourced from a ConfigMap in the openshift-config namespace. The configMap field must be set when from is set to ConfigMap.\n\nPossible enum values:\n - `\"ConfigMap\"` represents a ConfigMap source.", + "type": "string", + "default": "", + "enum": [ + "ConfigMap" + ] + } + } + }, "com.github.openshift.api.operator.v1.ForwardPlugin": { "description": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", "type": "object", @@ -27679,7 +28285,7 @@ "type": "object", "properties": { "ipForwarding": { - "description": "IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", + "description": "ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", "type": "string" }, "ipv4": { @@ -27693,7 +28299,7 @@ "$ref": "#/definitions/com.github.openshift.api.operator.v1.IPv6GatewayConfig" }, "routingViaHost": { - "description": "RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", + "description": "routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", "type": "boolean" } } @@ -27875,7 +28481,7 @@ ], "properties": { "hybridClusterNetwork": { - "description": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "description": "hybridClusterNetwork defines a network space given to nodes on an additional overlay network.", "type": "array", "items": { "default": {}, @@ -27884,7 +28490,7 @@ "x-kubernetes-list-type": "atomic" }, "hybridOverlayVXLANPort": { - "description": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", + "description": "hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", "type": "integer", "format": "int64" } @@ -27922,11 +28528,11 @@ ], "properties": { "staticIPAMConfig": { - "description": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", + "description": "staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", "$ref": "#/definitions/com.github.openshift.api.operator.v1.StaticIPAMConfig" }, "type": { - "description": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "description": "type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", "type": "string", "default": "" } @@ -27949,10 +28555,32 @@ "com.github.openshift.api.operator.v1.IPsecConfig": { "type": "object", "properties": { + "full": { + "description": "full defines configuration parameters for the IPsec `Full` mode. This is permitted only when mode is configured with `Full`, and forbidden otherwise.", + "$ref": "#/definitions/com.github.openshift.api.operator.v1.IPsecFullModeConfig" + }, "mode": { "description": "mode defines the behaviour of the ipsec configuration within the platform. Valid values are `Disabled`, `External` and `Full`. When 'Disabled', ipsec will not be enabled at the node level. When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), this is left to the user to configure.", "type": "string" } + }, + "x-kubernetes-unions": [ + { + "discriminator": "mode", + "fields-to-discriminateBy": { + "full": "Full" + } + } + ] + }, + "com.github.openshift.api.operator.v1.IPsecFullModeConfig": { + "description": "IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode.", + "type": "object", + "properties": { + "encapsulation": { + "description": "encapsulation option to configure libreswan on how inter-pod traffic across nodes are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 for the encapsulation. Valid values are Always, Auto and omitted. Always means enable UDP encapsulation regardless of whether NAT is detected. Auto means enable UDP encapsulation based on the detection of NAT. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Auto.", + "type": "string" + } } }, "com.github.openshift.api.operator.v1.IPv4GatewayConfig": { @@ -27992,7 +28620,7 @@ "type": "object", "properties": { "internalJoinSubnet": { - "description": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/48 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "description": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", "type": "string" }, "internalTransitSwitchSubnet": { @@ -28272,7 +28900,7 @@ "type": "object", "properties": { "format": { - "description": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", + "description": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", "type": "string" }, "name": { @@ -28373,6 +29001,11 @@ "description": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", "$ref": "#/definitions/com.github.openshift.api.operator.v1.IngressControllerHTTPHeaders" }, + "idleConnectionTerminationPolicy": { + "description": "idleConnectionTerminationPolicy maps directly to HAProxy's idle-close-on-response option and controls whether HAProxy keeps idle frontend connections open during a soft stop (router reload).\n\nAllowed values for this field are \"Immediate\" and \"Deferred\". The default value is \"Immediate\".\n\nWhen set to \"Immediate\", idle connections are closed immediately during router reloads. This ensures immediate propagation of route changes but may impact clients sensitive to connection resets.\n\nWhen set to \"Deferred\", HAProxy will maintain idle connections during a soft reload instead of closing them immediately. These connections remain open until any of the following occurs:\n\n - A new request is received on the connection, in which\n case HAProxy handles it in the old process and closes\n the connection after sending the response.\n\n - HAProxy's `timeout http-keep-alive` duration expires\n (300 seconds in OpenShift's configuration, not\n configurable).\n\n - The client's keep-alive timeout expires, causing the\n client to close the connection.\n\nSetting Deferred can help prevent errors in clients or load balancers that do not properly handle connection resets. Additionally, this option allows you to retain the pre-2.4 HAProxy behaviour: in HAProxy version 2.2 (OpenShift versions < 4.14), maintaining idle connections during a soft reload was the default behaviour, but starting with HAProxy 2.4, the default changed to closing idle connections immediately.\n\nImportant Consideration:\n\n - Using Deferred will result in temporary inconsistencies\n for the first request on each persistent connection\n after a route update and router reload. This request\n will be processed by the old HAProxy process using its\n old configuration. Subsequent requests will use the\n updated configuration.\n\nOperational Considerations:\n\n - Keeping idle connections open during reloads may lead\n to an accumulation of old HAProxy processes if\n connections remain idle for extended periods,\n especially in environments where frequent reloads\n occur.\n\n - Consider monitoring the number of HAProxy processes in\n the router pods when Deferred is set.\n\n - You may need to enable or adjust the\n `ingress.operator.openshift.io/hard-stop-after`\n duration (configured via an annotation on the\n IngressController resource) in environments with\n frequent reloads to prevent resource exhaustion.", + "type": "string", + "default": "Immediate" + }, "logging": { "description": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", "$ref": "#/definitions/com.github.openshift.api.operator.v1.IngressControllerLogging" @@ -28486,7 +29119,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" }, "connectTimeout": { - "description": "ConnectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", + "description": "connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" }, "headerBufferBytes": { @@ -28757,7 +29390,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -28945,7 +29578,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -29131,7 +29764,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -29309,7 +29942,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -29415,7 +30048,8 @@ "description": "LoadBalancerStrategy holds parameters for a load balancer.", "type": "object", "required": [ - "scope" + "scope", + "dnsManagementPolicy" ], "properties": { "allowedSourceRanges": { @@ -29474,6 +30108,37 @@ } ] }, + "com.github.openshift.api.operator.v1.Logo": { + "description": "Logo defines a configuration based on theme modes for the console UI logo.", + "type": "object", + "required": [ + "type", + "themes" + ], + "properties": { + "themes": { + "description": "themes specifies the themes for the console UI logo. themes is a required field that allows a list of themes. Each item in the themes list must have a unique mode and a source field. Each mode determines whether the logo is for the dark or light mode of the console UI. If a theme is not specified, the default OpenShift logo will be displayed for that theme. There must be at least one entry and no more than 2 entries.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.Theme" + }, + "x-kubernetes-list-map-keys": [ + "mode" + ], + "x-kubernetes-list-type": "map" + }, + "type": { + "description": "type specifies the type of the logo for the console UI. It determines whether the logo is for the masthead or favicon. type is a required field that allows values of Masthead and Favicon. When set to \"Masthead\", the logo will be used in the masthead and about modal of the console UI. When set to \"Favicon\", the logo will be used as the favicon of the console UI.\n\nPossible enum values:\n - `\"Favicon\"` Favicon represents the favicon logo.\n - `\"Masthead\"` Masthead represents the logo in the masthead.", + "type": "string", + "default": "", + "enum": [ + "Favicon", + "Masthead" + ] + } + } + }, "com.github.openshift.api.operator.v1.MTUMigration": { "description": "MTUMigration contains infomation about MTU migration.", "type": "object", @@ -29553,7 +30218,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -29593,7 +30258,7 @@ "type": "string" }, "managedBootImages": { - "description": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, no boot images will be updated.", + "description": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.operator.v1.ManagedBootImages" }, @@ -29639,9 +30304,12 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" + }, + "managedBootImagesStatus": { + "description": "managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is and will be used by Machine Config Controller while performing boot image updates.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.ManagedBootImages" }, "nodeDisruptionPolicyStatus": { "description": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", @@ -29688,7 +30356,7 @@ ], "properties": { "mode": { - "description": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated.", + "description": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. None means that every resource matched by the machine manager will not be updated.", "type": "string", "default": "" }, @@ -29977,7 +30645,7 @@ "type": "boolean" }, "disableMultiNetwork": { - "description": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "description": "disableMultiNetwork defaults to 'false' and this setting enables the pod multi-networking capability. disableMultiNetwork when set to 'true' at cluster install time does not install the components, typically the Multus CNI and the network-attachment-definition CRD, that enable the pod multi-networking capability. Setting the parameter to 'true' might be useful when you need install third-party CNI plugins, but these plugins are not supported by Red Hat. Changing the parameter value as a postinstallation cluster task has no effect.", "type": "boolean" }, "disableNetworkDiagnostics": { @@ -30249,7 +30917,7 @@ "x-kubernetes-list-type": "atomic" }, "name": { - "description": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "description": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", "type": "string", "default": "" } @@ -30355,7 +31023,7 @@ "x-kubernetes-list-type": "atomic" }, "name": { - "description": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "description": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", "type": "string", "default": "" } @@ -30394,15 +31062,13 @@ "description": "NodeStatus provides information about the current state of a particular node managed by this operator.", "type": "object", "required": [ - "nodeName", - "currentRevision" + "nodeName" ], "properties": { "currentRevision": { - "description": "currentRevision is the generation of the most recently successful deployment", + "description": "currentRevision is the generation of the most recently successful deployment. Can not be set on creation of a nodeStatus. Updates must only increase the value.", "type": "integer", - "format": "int32", - "default": 0 + "format": "int32" }, "lastFailedCount": { "description": "lastFailedCount is how often the installer pod of the last failed revision failed.", @@ -30442,7 +31108,7 @@ "default": "" }, "targetRevision": { - "description": "targetRevision is the generation of the deployment we're trying to apply", + "description": "targetRevision is the generation of the deployment we're trying to apply. Can not be set on creation of a nodeStatus.", "type": "integer", "format": "int32" } @@ -30452,12 +31118,160 @@ "type": "object", "properties": { "latestAvailableRevision": { - "description": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", + "description": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", "type": "integer", "format": "int32" } } }, + "com.github.openshift.api.operator.v1.OLM": { + "description": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "type": "object", + "required": [ + "metadata", + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec holds user settable values for configuration", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.OLMSpec" + }, + "status": { + "description": "status holds observed values from the cluster. They may not be overridden.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.OLMStatus" + } + } + }, + "com.github.openshift.api.operator.v1.OLMList": { + "description": "OLMList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "type": "object", + "required": [ + "metadata", + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items contains the items", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.OLM" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + } + }, + "com.github.openshift.api.operator.v1.OLMSpec": { + "type": "object", + "required": [ + "managementState" + ], + "properties": { + "logLevel": { + "description": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "type": "string" + }, + "managementState": { + "description": "managementState indicates whether and how the operator should manage the component", + "type": "string", + "default": "" + }, + "observedConfig": { + "description": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" + }, + "operatorLogLevel": { + "description": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "type": "string" + }, + "unsupportedConfigOverrides": { + "description": "unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" + } + } + }, + "com.github.openshift.api.operator.v1.OLMStatus": { + "type": "object", + "required": [ + "readyReplicas" + ], + "properties": { + "conditions": { + "description": "conditions is a list of conditions and their status", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.OperatorCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "generations": { + "description": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.GenerationStatus" + }, + "x-kubernetes-list-map-keys": [ + "group", + "resource", + "namespace", + "name" + ], + "x-kubernetes-list-type": "map" + }, + "latestAvailableRevision": { + "description": "latestAvailableRevision is the deploymentID of the most recent deployment", + "type": "integer", + "format": "int32" + }, + "observedGeneration": { + "description": "observedGeneration is the last generation change you've dealt with", + "type": "integer", + "format": "int64" + }, + "readyReplicas": { + "description": "readyReplicas indicates how many replicas are ready and at the desired state", + "type": "integer", + "format": "int32", + "default": 0 + }, + "version": { + "description": "version is the level this availability applies to", + "type": "string" + } + } + }, "com.github.openshift.api.operator.v1.OVNKubernetesConfig": { "description": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", "type": "object", @@ -30477,7 +31291,7 @@ "format": "int64" }, "hybridOverlayConfig": { - "description": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "description": "hybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", "$ref": "#/definitions/com.github.openshift.api.operator.v1.HybridOverlayConfig" }, "ipsecConfig": { @@ -30513,7 +31327,7 @@ "type": "string" }, "v6InternalSubnet": { - "description": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48", + "description": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/64", "type": "string" } } @@ -30564,7 +31378,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -30710,7 +31524,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -30973,6 +31787,9 @@ "com.github.openshift.api.operator.v1.PartialSelector": { "description": "PartialSelector provides label selector(s) that can be used to match machine management resources.", "type": "object", + "required": [ + "machineResourceSelector" + ], "properties": { "machineResourceSelector": { "description": "machineResourceSelector is a label selector that can be used to select machine resources like MachineSets.", @@ -31200,7 +32017,7 @@ ], "properties": { "serviceName": { - "description": "serviceName is the full name (e.g. crio.service) of the service to be reloaded Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "description": "serviceName is the full name (e.g. crio.service) of the service to be reloaded Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", "type": "string", "default": "" } @@ -31236,7 +32053,7 @@ ], "properties": { "serviceName": { - "description": "serviceName is the full name (e.g. crio.service) of the service to be restarted Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "description": "serviceName is the full name (e.g. crio.service) of the service to be restarted Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", "type": "string", "default": "" } @@ -31362,7 +32179,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -31507,7 +32324,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -31653,7 +32470,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -31760,7 +32577,7 @@ "type": "object", "properties": { "ipamConfig": { - "description": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", + "description": "ipamConfig configures IPAM module will be used for IP Address Management (IPAM).", "$ref": "#/definitions/com.github.openshift.api.operator.v1.IPAMConfig" }, "master": { @@ -31783,12 +32600,12 @@ "type": "object", "properties": { "address": { - "description": "Address is the IP address in CIDR format", + "description": "address is the IP address in CIDR format", "type": "string", "default": "" }, "gateway": { - "description": "Gateway is IP inside of subnet to designate as the gateway", + "description": "gateway is IP inside of subnet to designate as the gateway", "type": "string" } } @@ -31798,7 +32615,7 @@ "type": "object", "properties": { "addresses": { - "description": "Addresses configures IP address for the interface", + "description": "addresses configures IP address for the interface", "type": "array", "items": { "default": {}, @@ -31807,11 +32624,11 @@ "x-kubernetes-list-type": "atomic" }, "dns": { - "description": "DNS configures DNS for the interface", + "description": "dns configures DNS for the interface", "$ref": "#/definitions/com.github.openshift.api.operator.v1.StaticIPAMDNS" }, "routes": { - "description": "Routes configures IP routes for the interface", + "description": "routes configures IP routes for the interface", "type": "array", "items": { "default": {}, @@ -31826,11 +32643,11 @@ "type": "object", "properties": { "domain": { - "description": "Domain configures the domainname the local domain used for short hostname lookups", + "description": "domain configures the domainname the local domain used for short hostname lookups", "type": "string" }, "nameservers": { - "description": "Nameservers points DNS servers for IP lookup", + "description": "nameservers points DNS servers for IP lookup", "type": "array", "items": { "type": "string", @@ -31839,7 +32656,7 @@ "x-kubernetes-list-type": "atomic" }, "search": { - "description": "Search configures priority ordered search domains for short hostname lookups", + "description": "search configures priority ordered search domains for short hostname lookups", "type": "array", "items": { "type": "string", @@ -31857,12 +32674,12 @@ ], "properties": { "destination": { - "description": "Destination points the IP route destination", + "description": "destination points the IP route destination", "type": "string", "default": "" }, "gateway": { - "description": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", + "description": "gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", "type": "string" } } @@ -32089,7 +32906,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" }, "vsphereStorageDriver": { - "description": "VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", + "description": "vsphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", "type": "string", "default": "" } @@ -32181,6 +32998,30 @@ } } }, + "com.github.openshift.api.operator.v1.Theme": { + "description": "Theme defines a theme mode for the console UI.", + "type": "object", + "required": [ + "mode", + "source" + ], + "properties": { + "mode": { + "description": "mode is used to specify what theme mode a logo will apply to in the console UI. mode is a required field that allows values of Dark and Light. When set to Dark, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Dark mode. When set to Light, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Light mode.\n\nPossible enum values:\n - `\"Dark\"` represents the dark mode for a console theme.\n - `\"Light\"` represents the light mode for a console theme.", + "type": "string", + "default": "", + "enum": [ + "Dark", + "Light" + ] + }, + "source": { + "description": "source is used by the console to locate the specified file containing a custom logo. source is a required field that references a ConfigMap name and key that contains the custom logo file in the openshift-config namespace. You can create it with a command like: - 'oc create configmap custom-logos-config --namespace=openshift-config --from-file=/path/to/file' The ConfigMap key must include the file extension so that the console serves the file with the correct MIME type. The recommended file format for the Masthead and Favicon logos is SVG, but other file formats are allowed if supported by the browser. The logo image size must be less than 1 MB due to constraints on the ConfigMap size. For more information, see the documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/web_console/customizing-web-console#customizing-web-console", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1.FileReferenceSource" + } + } + }, "com.github.openshift.api.operator.v1.Upstream": { "description": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n - For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n - For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.", "type": "object", @@ -32189,16 +33030,16 @@ ], "properties": { "address": { - "description": "Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", + "description": "address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", "type": "string" }, "port": { - "description": "Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", + "description": "port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", "type": "integer", "format": "int64" }, "type": { - "description": "Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", + "description": "type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", "type": "string", "default": "" } @@ -32209,7 +33050,7 @@ "type": "object", "properties": { "policy": { - "description": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "description": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", "type": "string" }, "protocolStrategy": { @@ -32223,7 +33064,7 @@ "$ref": "#/definitions/com.github.openshift.api.operator.v1.DNSTransportConfig" }, "upstreams": { - "description": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "description": "upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", "type": "array", "items": { "default": {}, @@ -32251,6 +33092,11 @@ "type": "integer", "format": "int64" }, + "maxAllowedBlockVolumesPerNode": { + "description": "maxAllowedBlockVolumesPerNode is an optional configuration parameter that allows setting a custom value for the limit of the number of PersistentVolumes attached to a node. In vSphere version 7 this limit was set to 59 by default, however in vSphere version 8 this limit was increased to 255. Before increasing this value above 59 the cluster administrator needs to ensure that every node forming the cluster is updated to ESXi version 8 or higher and that all nodes are running the same version. The limit must be between 1 and 255, which matches the vSphere version 8 maximum. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 59, which matches the limit for vSphere version 7.", + "type": "integer", + "format": "int32" + }, "topologyCategories": { "description": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", "type": "array", @@ -32282,6 +33128,90 @@ } } }, + "com.github.openshift.api.operator.v1alpha1.ClusterVersionOperator": { + "description": "ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "type": "object", + "required": [ + "metadata", + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the specification of the desired behavior of the Cluster Version Operator.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec" + }, + "status": { + "description": "status is the most recently observed status of the Cluster Version Operator.", + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus" + } + } + }, + "com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorList": { + "description": "ClusterVersionOperatorList is a collection of ClusterVersionOperators.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "type": "object", + "required": [ + "metadata" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of ClusterVersionOperators.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.openshift.api.operator.v1alpha1.ClusterVersionOperator" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + } + }, + "com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec": { + "description": "ClusterVersionOperatorSpec is the specification of the desired behavior of the Cluster Version Operator.", + "type": "object", + "properties": { + "operatorLogLevel": { + "description": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "type": "string" + } + } + }, + "com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus": { + "description": "ClusterVersionOperatorStatus defines the observed status of the Cluster Version Operator.", + "type": "object", + "properties": { + "observedGeneration": { + "description": "observedGeneration represents the most recent generation observed by the operator and specifies the version of the spec field currently being synced.", + "type": "integer", + "format": "int64" + } + } + }, "com.github.openshift.api.operator.v1alpha1.DelegatedAuthentication": { "description": "DelegatedAuthentication allows authentication to be disabled.", "type": "object", @@ -32366,7 +33296,7 @@ "type": "object", "properties": { "pvcName": { - "description": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", + "description": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", "type": "string", "default": "" } @@ -32374,9 +33304,6 @@ }, "com.github.openshift.api.operator.v1alpha1.EtcdBackupStatus": { "type": "object", - "required": [ - "backupJob" - ], "properties": { "backupJob": { "description": "backupJob is the reference to the Job that executes the backup. Optional", @@ -32392,9 +33319,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" } } }, @@ -32465,7 +33390,7 @@ "$ref": "#/definitions/com.github.openshift.api.config.v1.LeaderElection" }, "servingInfo": { - "description": "ServingInfo is the HTTP serving information for the controller's endpoints", + "description": "servingInfo is the HTTP serving information for the controller's endpoints", "default": {}, "$ref": "#/definitions/com.github.openshift.api.config.v1.HTTPServingInfo" } @@ -32653,7 +33578,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -32984,19 +33909,19 @@ ], "properties": { "latency": { - "description": "Latency records how long the action mentioned in the entry took.", + "description": "latency records how long the action mentioned in the entry took.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" }, "message": { - "description": "Message explaining status in a human readable format.", + "description": "message explaining status in a human readable format.", "type": "string" }, "reason": { - "description": "Reason for status in a machine readable format.", + "description": "reason for status in a machine readable format.", "type": "string" }, "success": { - "description": "Success indicates if the log entry indicates a success or failure.", + "description": "success indicates if the log entry indicates a success or failure.", "type": "boolean", "default": false }, @@ -33014,11 +33939,11 @@ ], "properties": { "end": { - "description": "End of outage detected", + "description": "end of outage detected", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "endLogs": { - "description": "EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", + "description": "endLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", "type": "array", "items": { "default": {}, @@ -33026,15 +33951,15 @@ } }, "message": { - "description": "Message summarizes outage details in a human readable format.", + "description": "message summarizes outage details in a human readable format.", "type": "string" }, "start": { - "description": "Start of outage detected", + "description": "start of outage detected", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "startLogs": { - "description": "StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", + "description": "startLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", "type": "array", "items": { "default": {}, @@ -33065,12 +33990,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec defines the source and target of the connectivity check", + "description": "spec defines the source and target of the connectivity check", "default": {}, "$ref": "#/definitions/com.github.openshift.api.operatorcontrolplane.v1alpha1.PodNetworkConnectivityCheckSpec" }, "status": { - "description": "Status contains the observed status of the connectivity check", + "description": "status contains the observed status of the connectivity check", "default": {}, "$ref": "#/definitions/com.github.openshift.api.operatorcontrolplane.v1alpha1.PodNetworkConnectivityCheckStatus" } @@ -33090,20 +34015,20 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { - "description": "Message indicating details about last transition in a human readable format.", + "description": "message indicating details about last transition in a human readable format.", "type": "string" }, "reason": { - "description": "Reason for the condition's last status transition in a machine readable format.", + "description": "reason for the condition's last status transition in a machine readable format.", "type": "string" }, "status": { - "description": "Status of the condition", + "description": "status of the condition", "type": "string", "default": "" }, "type": { - "description": "Type of the condition", + "description": "type of the condition", "type": "string", "default": "" } @@ -33122,7 +34047,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -33148,7 +34073,7 @@ ], "properties": { "sourcePod": { - "description": "SourcePod names the pod from which the condition will be checked", + "description": "sourcePod names the pod from which the condition will be checked", "type": "string", "default": "" }, @@ -33168,7 +34093,7 @@ "type": "object", "properties": { "conditions": { - "description": "Conditions summarize the status of the check", + "description": "conditions summarize the status of the check", "type": "array", "items": { "default": {}, @@ -33178,7 +34103,7 @@ "x-kubernetes-patch-strategy": "merge" }, "failures": { - "description": "Failures contains logs of unsuccessful check actions", + "description": "failures contains logs of unsuccessful check actions", "type": "array", "items": { "default": {}, @@ -33186,7 +34111,7 @@ } }, "outages": { - "description": "Outages contains logs of time periods of outages", + "description": "outages contains logs of time periods of outages", "type": "array", "items": { "default": {}, @@ -33194,7 +34119,7 @@ } }, "successes": { - "description": "Successes contains logs successful check actions", + "description": "successes contains logs successful check actions", "type": "array", "items": { "default": {}, @@ -33272,7 +34197,8 @@ "dnsName", "targets", "recordType", - "recordTTL" + "recordTTL", + "dnsManagementPolicy" ], "properties": { "dnsManagementPolicy": { @@ -33403,17 +34329,17 @@ "type": "string" }, "ca": { - "description": "CA is the CA for verifying TLS connections", + "description": "ca is the CA for verifying TLS connections", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, @@ -33422,7 +34348,7 @@ "type": "string" }, "url": { - "description": "URL is the remote URL to connect to", + "description": "url is the remote URL to connect to", "type": "string", "default": "" } @@ -33671,12 +34597,12 @@ "type": "string" }, "ca": { - "description": "CA is the CA for verifying TLS connections", + "description": "ca is the CA for verifying TLS connections", "type": "string", "default": "" }, "certFile": { - "description": "CertFile is a file containing a PEM-encoded certificate", + "description": "certFile is a file containing a PEM-encoded certificate", "type": "string", "default": "" }, @@ -33686,7 +34612,7 @@ "default": "" }, "keyFile": { - "description": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "description": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", "type": "string", "default": "" }, @@ -33695,7 +34621,7 @@ "type": "string" }, "url": { - "description": "URL is the remote URL to connect to", + "description": "url is the remote URL to connect to", "type": "string", "default": "" }, @@ -34218,12 +35144,12 @@ ], "properties": { "authentication": { - "description": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "description": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", "type": "string", "default": "" }, "encryption": { - "description": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", + "description": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", "type": "string", "default": "" } @@ -34245,7 +35171,7 @@ "type": "string" }, "secrets": { - "description": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", + "description": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", "type": "array", "items": { "default": {}, @@ -34399,9 +35325,7 @@ "x-kubernetes-list-map-keys": [ "type" ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "map" } } }, @@ -34423,12 +35347,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec defines the behavior of the Namespace.", + "description": "spec defines the behavior of the Namespace.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.project.v1.ProjectSpec" }, "status": { - "description": "Status describes the current status of a Namespace", + "description": "status describes the current status of a Namespace", "default": {}, "$ref": "#/definitions/com.github.openshift.api.project.v1.ProjectStatus" } @@ -34446,7 +35370,7 @@ "type": "string" }, "items": { - "description": "Items is the list of projects", + "description": "items is the list of projects", "type": "array", "items": { "default": {}, @@ -34473,11 +35397,11 @@ "type": "string" }, "description": { - "description": "Description is the description to apply to a project", + "description": "description is the description to apply to a project", "type": "string" }, "displayName": { - "description": "DisplayName is the display name to apply to a project", + "description": "displayName is the display name to apply to a project", "type": "string" }, "kind": { @@ -34496,7 +35420,7 @@ "type": "object", "properties": { "finalizers": { - "description": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage", + "description": "finalizers is an opaque list of values that must be empty to permanently remove object from storage", "type": "array", "items": { "type": "string", @@ -34520,7 +35444,7 @@ "x-kubernetes-patch-strategy": "merge" }, "phase": { - "description": "Phase is the current lifecycle phase of the project\n\nPossible enum values:\n - `\"Active\"` means the namespace is available for use in the system\n - `\"Terminating\"` means the namespace is undergoing graceful termination", + "description": "phase is the current lifecycle phase of the project\n\nPossible enum values:\n - `\"Active\"` means the namespace is available for use in the system\n - `\"Terminating\"` means the namespace is undergoing graceful termination", "type": "string", "enum": [ "Active", @@ -34551,12 +35475,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec defines the desired quota", + "description": "spec defines the desired quota", "default": {}, "$ref": "#/definitions/com.github.openshift.api.quota.v1.ClusterResourceQuotaSpec" }, "status": { - "description": "Status defines the actual enforced quota and its current usage", + "description": "status defines the actual enforced quota and its current usage", "default": {}, "$ref": "#/definitions/com.github.openshift.api.quota.v1.ClusterResourceQuotaStatus" } @@ -34574,7 +35498,7 @@ "type": "string" }, "items": { - "description": "Items is a list of AppliedClusterResourceQuota", + "description": "items is a list of AppliedClusterResourceQuota", "type": "array", "items": { "default": {}, @@ -34614,12 +35538,12 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec defines the desired quota", + "description": "spec defines the desired quota", "default": {}, "$ref": "#/definitions/com.github.openshift.api.quota.v1.ClusterResourceQuotaSpec" }, "status": { - "description": "Status defines the actual enforced quota and its current usage", + "description": "status defines the actual enforced quota and its current usage", "default": {}, "$ref": "#/definitions/com.github.openshift.api.quota.v1.ClusterResourceQuotaStatus" } @@ -34637,7 +35561,7 @@ "type": "string" }, "items": { - "description": "Items is a list of ClusterResourceQuotas", + "description": "items is a list of ClusterResourceQuotas", "type": "array", "items": { "default": {}, @@ -34682,12 +35606,12 @@ ], "properties": { "quota": { - "description": "Quota defines the desired quota", + "description": "quota defines the desired quota", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaSpec" }, "selector": { - "description": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", + "description": "selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.quota.v1.ClusterResourceQuotaSelector" } @@ -34701,7 +35625,7 @@ ], "properties": { "namespaces": { - "description": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", + "description": "namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", "type": "array", "items": { "default": {}, @@ -34709,7 +35633,7 @@ } }, "total": { - "description": "Total defines the actual enforced quota and its current usage across all projects", + "description": "total defines the actual enforced quota and its current usage across all projects", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaStatus" } @@ -34724,12 +35648,12 @@ ], "properties": { "namespace": { - "description": "Namespace the project this status applies to", + "description": "namespace the project this status applies to", "type": "string", "default": "" }, "status": { - "description": "Status indicates how many resources have been consumed by this project", + "description": "status indicates how many resources have been consumed by this project", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaStatus" } @@ -34870,7 +35794,7 @@ "type": "object", "properties": { "conditions": { - "description": "Conditions is the state of the route, may be empty.", + "description": "conditions is the state of the route, may be empty.", "type": "array", "items": { "default": {}, @@ -34882,7 +35806,7 @@ "x-kubernetes-list-type": "map" }, "host": { - "description": "Host is the host string under which the route is exposed; this value is required", + "description": "host is the host string under which the route is exposed; this value is required", "type": "string" }, "routerCanonicalHostname": { @@ -34920,12 +35844,12 @@ "type": "string" }, "status": { - "description": "Status is the status of the condition. Can be True, False, Unknown.", + "description": "status is the status of the condition. Can be True, False, Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", + "description": "type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", "type": "string", "default": "" } @@ -35123,7 +36047,7 @@ "type": "string" }, "externalCertificate": { - "description": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set.", + "description": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set. The router service account needs to be granted with read-only access to this secret, please refer to openshift docs for additional details.", "$ref": "#/definitions/com.github.openshift.api.route.v1.LocalObjectReference" }, "insecureEdgeTerminationPolicy": { @@ -35357,7 +36281,7 @@ ], "properties": { "driver": { - "description": "Driver is the name of the Flexvolume driver.", + "description": "driver is the name of the Flexvolume driver.", "type": "string", "default": "" } @@ -35368,7 +36292,7 @@ "type": "object", "properties": { "ranges": { - "description": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", + "description": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", "type": "array", "items": { "default": {}, @@ -35377,7 +36301,7 @@ "x-kubernetes-list-type": "atomic" }, "type": { - "description": "Type is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "description": "type is the strategy that will dictate what FSGroup is used in the SecurityContext.", "type": "string" } } @@ -35387,12 +36311,12 @@ "type": "object", "properties": { "max": { - "description": "Max is the end of the range, inclusive.", + "description": "max is the end of the range, inclusive.", "type": "integer", "format": "int64" }, "min": { - "description": "Min is the start of the range, inclusive.", + "description": "min is the start of the range, inclusive.", "type": "integer", "format": "int64" } @@ -35660,21 +36584,21 @@ "type": "object", "properties": { "type": { - "description": "Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "description": "type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", "type": "string" }, "uid": { - "description": "UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", + "description": "uid is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", "type": "integer", "format": "int64" }, "uidRangeMax": { - "description": "UIDRangeMax defines the max value for a strategy that allocates by range.", + "description": "uidRangeMax defines the max value for a strategy that allocates by range.", "type": "integer", "format": "int64" }, "uidRangeMin": { - "description": "UIDRangeMin defines the min value for a strategy that allocates by range.", + "description": "uidRangeMin defines the min value for a strategy that allocates by range.", "type": "integer", "format": "int64" } @@ -35689,7 +36613,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions" }, "type": { - "description": "Type is the strategy that will dictate what SELinux context is used in the SecurityContext.", + "description": "type is the strategy that will dictate what SELinux context is used in the SecurityContext.", "type": "string" } } @@ -35713,41 +36637,41 @@ ], "properties": { "allowHostDirVolumePlugin": { - "description": "AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", + "description": "allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", "type": "boolean", "default": false }, "allowHostIPC": { - "description": "AllowHostIPC determines if the policy allows host ipc in the containers.", + "description": "allowHostIPC determines if the policy allows host ipc in the containers.", "type": "boolean", "default": false }, "allowHostNetwork": { - "description": "AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "description": "allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", "type": "boolean", "default": false }, "allowHostPID": { - "description": "AllowHostPID determines if the policy allows host pid in the containers.", + "description": "allowHostPID determines if the policy allows host pid in the containers.", "type": "boolean", "default": false }, "allowHostPorts": { - "description": "AllowHostPorts determines if the policy allows host ports in the containers.", + "description": "allowHostPorts determines if the policy allows host ports in the containers.", "type": "boolean", "default": false }, "allowPrivilegeEscalation": { - "description": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "description": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "type": "boolean" }, "allowPrivilegedContainer": { - "description": "AllowPrivilegedContainer determines if a container can request to be run as privileged.", + "description": "allowPrivilegedContainer determines if a container can request to be run as privileged.", "type": "boolean", "default": false }, "allowedCapabilities": { - "description": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", + "description": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", "type": "array", "items": { "type": "string", @@ -35756,7 +36680,7 @@ "x-kubernetes-list-type": "atomic" }, "allowedFlexVolumes": { - "description": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", + "description": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", "type": "array", "items": { "default": {}, @@ -35765,7 +36689,7 @@ "x-kubernetes-list-type": "atomic" }, "allowedUnsafeSysctls": { - "description": "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "description": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "type": "array", "items": { "type": "string", @@ -35778,7 +36702,7 @@ "type": "string" }, "defaultAddCapabilities": { - "description": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "description": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", "type": "array", "items": { "type": "string", @@ -35787,11 +36711,11 @@ "x-kubernetes-list-type": "atomic" }, "defaultAllowPrivilegeEscalation": { - "description": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "description": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", "type": "boolean" }, "forbiddenSysctls": { - "description": "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", + "description": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "type": "array", "items": { "type": "string", @@ -35800,7 +36724,7 @@ "x-kubernetes-list-type": "atomic" }, "fsGroup": { - "description": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "description": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.security.v1.FSGroupStrategyOptions" }, @@ -35823,17 +36747,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "priority": { - "description": "Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", + "description": "priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", "type": "integer", "format": "int32" }, "readOnlyRootFilesystem": { - "description": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "description": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "type": "boolean", "default": false }, "requiredDropCapabilities": { - "description": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "description": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", "type": "array", "items": { "type": "string", @@ -35842,17 +36766,17 @@ "x-kubernetes-list-type": "atomic" }, "runAsUser": { - "description": "RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "description": "runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.security.v1.RunAsUserStrategyOptions" }, "seLinuxContext": { - "description": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", + "description": "seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.security.v1.SELinuxContextStrategyOptions" }, "seccompProfiles": { - "description": "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", + "description": "seccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", "type": "array", "items": { "type": "string", @@ -35861,7 +36785,7 @@ "x-kubernetes-list-type": "atomic" }, "supplementalGroups": { - "description": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "description": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "default": {}, "$ref": "#/definitions/com.github.openshift.api.security.v1.SupplementalGroupsStrategyOptions" }, @@ -35880,7 +36804,7 @@ "x-kubernetes-list-type": "atomic" }, "volumes": { - "description": "Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", + "description": "volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", "type": "array", "items": { "type": "string", @@ -35952,7 +36876,7 @@ "type": "object", "properties": { "ranges": { - "description": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", + "description": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", "type": "array", "items": { "default": {}, @@ -35961,7 +36885,7 @@ "x-kubernetes-list-type": "atomic" }, "type": { - "description": "Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "description": "type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", "type": "string" } } @@ -36073,7 +36997,7 @@ "type": "string" }, "items": { - "description": "Items contains the items", + "description": "items contains the items", "type": "array", "items": { "default": {}, @@ -36176,8 +37100,11 @@ } }, "com.github.openshift.api.sharedresource.v1alpha1.SharedConfigMap": { - "description": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n volumes:\n - name: shared-configmap\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "description": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n\n\tvolumes:\n\t- name: shared-configmap\n\t csi:\n\t driver: csi.sharedresource.openshift.io\n\t volumeAttributes:\n\t sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n\t`oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n\t`oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", "type": "object", + "required": [ + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -36282,14 +37209,19 @@ "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" }, - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" } } }, "com.github.openshift.api.sharedresource.v1alpha1.SharedSecret": { - "description": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n volumes:\n - name: shared-secret\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "description": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n\n\tvolumes:\n\t- name: shared-secret\n\t csi:\n\t driver: csi.sharedresource.openshift.io\n\t volumeAttributes:\n\t sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n\t`oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n\t`oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", "type": "object", + "required": [ + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -36394,8 +37326,10 @@ "default": {}, "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" }, - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" } } }, @@ -36465,7 +37399,7 @@ ], "properties": { "bindingIDs": { - "description": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.", + "description": "bindingIDs is a list of 'binding_id's provided during successive bind calls to the template service broker.", "type": "array", "items": { "type": "string", @@ -36478,7 +37412,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "templateInstance": { - "description": "templateinstance is a reference to a TemplateInstance object residing in a namespace.", + "description": "templateInstance is a reference to a TemplateInstance object residing in a namespace.", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" } @@ -36492,7 +37426,7 @@ ], "properties": { "description": { - "description": "Description of a parameter. Optional.", + "description": "description of a parameter. Optional.", "type": "string" }, "displayName": { @@ -36500,7 +37434,7 @@ "type": "string" }, "from": { - "description": "From is an input value for the generator. Optional.", + "description": "from is an input value for the generator. Optional.", "type": "string" }, "generate": { @@ -36508,7 +37442,7 @@ "type": "string" }, "name": { - "description": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", + "description": "name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", "type": "string", "default": "" }, @@ -36517,7 +37451,7 @@ "type": "boolean" }, "value": { - "description": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", + "description": "value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", "type": "string" } } @@ -36615,26 +37549,26 @@ ], "properties": { "lastTransitionTime": { - "description": "LastTransitionTime is the last time a condition status transitioned from one state to another.", + "description": "lastTransitionTime is the last time a condition status transitioned from one state to another.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { - "description": "Message is a human readable description of the details of the last transition, complementing reason.", + "description": "message is a human readable description of the details of the last transition, complementing reason.", "type": "string", "default": "" }, "reason": { - "description": "Reason is a brief machine readable explanation for the condition's last transition.", + "description": "reason is a brief machine readable explanation for the condition's last transition.", "type": "string", "default": "" }, "status": { - "description": "Status of the condition, one of True, False or Unknown.", + "description": "status of the condition, one of True, False or Unknown.", "type": "string", "default": "" }, "type": { - "description": "Type of the condition, currently Ready or InstantiateFailure.", + "description": "type of the condition, currently Ready or InstantiateFailure.", "type": "string", "default": "" } @@ -36749,7 +37683,7 @@ } }, "objects": { - "description": "Objects references the objects created by the TemplateInstance.", + "description": "objects references the objects created by the TemplateInstance.", "type": "array", "items": { "default": {}, @@ -36770,7 +37704,7 @@ "type": "string" }, "items": { - "description": "Items is a list of templates", + "description": "items is a list of templates", "type": "array", "items": { "default": {}, @@ -36809,7 +37743,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "users": { - "description": "Users is the list of users in this group.", + "description": "users is the list of users in this group.", "type": "array", "items": { "type": "string", @@ -36830,7 +37764,7 @@ "type": "string" }, "items": { - "description": "Items is the list of groups", + "description": "items is the list of groups", "type": "array", "items": { "default": {}, @@ -36862,7 +37796,7 @@ "type": "string" }, "extra": { - "description": "Extra holds extra information about this identity", + "description": "extra holds extra information about this identity", "type": "object", "additionalProperties": { "type": "string", @@ -36879,17 +37813,17 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "providerName": { - "description": "ProviderName is the source of identity information", + "description": "providerName is the source of identity information", "type": "string", "default": "" }, "providerUserName": { - "description": "ProviderUserName uniquely represents this identity in the scope of the provider", + "description": "providerUserName uniquely represents this identity in the scope of the provider", "type": "string", "default": "" }, "user": { - "description": "User is a reference to the user this identity is associated with Both Name and UID must be set", + "description": "user is a reference to the user this identity is associated with Both Name and UID must be set", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" } @@ -36907,7 +37841,7 @@ "type": "string" }, "items": { - "description": "Items is the list of identities", + "description": "items is the list of identities", "type": "array", "items": { "default": {}, @@ -36937,11 +37871,11 @@ "type": "string" }, "fullName": { - "description": "FullName is the full name of user", + "description": "fullName is the full name of user", "type": "string" }, "groups": { - "description": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", + "description": "groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", "type": "array", "items": { "type": "string", @@ -36949,7 +37883,7 @@ } }, "identities": { - "description": "Identities are the identities associated with this user", + "description": "identities are the identities associated with this user", "type": "array", "items": { "type": "string", @@ -36976,7 +37910,7 @@ "type": "string" }, "identity": { - "description": "Identity is a reference to an identity", + "description": "identity is a reference to an identity", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, @@ -36990,7 +37924,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "user": { - "description": "User is a reference to a user", + "description": "user is a reference to a user", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" } @@ -37008,7 +37942,7 @@ "type": "string" }, "items": { - "description": "Items is the list of users", + "description": "items is the list of users", "type": "array", "items": { "default": {}, @@ -37664,7 +38598,7 @@ } }, "io.k8s.api.core.v1.Binding": { - "description": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "description": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler.", "type": "object", "required": [ "target" @@ -37691,7 +38625,7 @@ } }, "io.k8s.api.core.v1.CSIPersistentVolumeSource": { - "description": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "description": "Represents storage that is managed by an external CSI volume driver", "type": "object", "required": [ "driver", @@ -39683,6 +40617,7 @@ } }, "io.k8s.api.core.v1.GRPCAction": { + "description": "GRPCAction specifies an action involving a GRPC service.", "type": "object", "required": [ "port" @@ -40091,19 +41026,19 @@ "type": "object", "properties": { "exec": { - "description": "Exec specifies the action to take.", + "description": "Exec specifies a command to execute in the container.", "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction" }, "httpGet": { - "description": "HTTPGet specifies the http request to perform.", + "description": "HTTPGet specifies an HTTP GET request to perform.", "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction" }, "sleep": { - "description": "Sleep represents the duration that the container should sleep before being terminated.", + "description": "Sleep represents a duration that the container should sleep.", "$ref": "#/definitions/io.k8s.api.core.v1.SleepAction" }, "tcpSocket": { - "description": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", + "description": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for backward compatibility. There is no validation of this field and lifecycle hooks will fail at runtime when it is specified.", "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction" } } @@ -40345,7 +41280,7 @@ "x-kubernetes-map-type": "atomic" }, "io.k8s.api.core.v1.LocalVolumeSource": { - "description": "Local represents directly-attached storage with node affinity (Beta feature)", + "description": "Local represents directly-attached storage with node affinity", "type": "object", "required": [ "path" @@ -40447,12 +41382,15 @@ ], "properties": { "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { + "description": "Human-readable message indicating details about last transition.", "type": "string" }, "reason": { + "description": "Unique, one-word, CamelCase reason for the condition's last transition.", "type": "string" }, "status": { @@ -40905,7 +41843,7 @@ "type": "object", "properties": { "addresses": { - "description": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", + "description": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "type": "array", "items": { "default": {}, @@ -40933,7 +41871,7 @@ } }, "conditions": { - "description": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + "description": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition", "type": "array", "items": { "default": {}, @@ -40969,7 +41907,7 @@ "x-kubernetes-list-type": "atomic" }, "nodeInfo": { - "description": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + "description": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info", "default": {}, "$ref": "#/definitions/io.k8s.api.core.v1.NodeSystemInfo" }, @@ -41216,10 +42154,12 @@ "type": "string" }, "status": { + "description": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required", "type": "string", "default": "" }, "type": { + "description": "Type is the type of the condition. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about", "type": "string", "default": "" } @@ -41467,27 +42407,27 @@ "type": "object", "properties": { "awsElasticBlockStore": { - "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" }, "azureDisk": { - "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" }, "azureFile": { - "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureFilePersistentVolumeSource" }, "cephfs": { - "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.CephFSPersistentVolumeSource" }, "cinder": { - "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.CinderPersistentVolumeSource" }, "csi": { - "description": "csi represents storage that is handled by an external CSI driver (Beta feature).", + "description": "csi represents storage that is handled by an external CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.CSIPersistentVolumeSource" }, "fc": { @@ -41495,19 +42435,19 @@ "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource" }, "flocker": { - "description": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" }, "gcePersistentDisk": { - "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" }, "glusterfs": { - "description": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "description": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsPersistentVolumeSource" }, "hostPath": { @@ -41527,31 +42467,31 @@ "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource" }, "photonPersistentDisk": { - "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" }, "portworxVolume": { - "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" }, "quobyte": { - "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" }, "rbd": { - "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.RBDPersistentVolumeSource" }, "scaleIO": { - "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOPersistentVolumeSource" }, "storageos": { - "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource" }, "vsphereVolume": { - "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" } } @@ -41576,15 +42516,15 @@ "x-kubernetes-list-type": "atomic" }, "awsElasticBlockStore": { - "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" }, "azureDisk": { - "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" }, "azureFile": { - "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureFilePersistentVolumeSource" }, "capacity": { @@ -41595,11 +42535,11 @@ } }, "cephfs": { - "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.CephFSPersistentVolumeSource" }, "cinder": { - "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.CinderPersistentVolumeSource" }, "claimRef": { @@ -41608,7 +42548,7 @@ "x-kubernetes-map-type": "granular" }, "csi": { - "description": "csi represents storage that is handled by an external CSI driver (Beta feature).", + "description": "csi represents storage that is handled by an external CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.CSIPersistentVolumeSource" }, "fc": { @@ -41616,19 +42556,19 @@ "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource" }, "flocker": { - "description": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" }, "gcePersistentDisk": { - "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" }, "glusterfs": { - "description": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "description": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsPersistentVolumeSource" }, "hostPath": { @@ -41670,23 +42610,23 @@ ] }, "photonPersistentDisk": { - "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" }, "portworxVolume": { - "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" }, "quobyte": { - "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" }, "rbd": { - "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.RBDPersistentVolumeSource" }, "scaleIO": { - "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOPersistentVolumeSource" }, "storageClassName": { @@ -41694,7 +42634,7 @@ "type": "string" }, "storageos": { - "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource" }, "volumeAttributesClassName": { @@ -41710,7 +42650,7 @@ ] }, "vsphereVolume": { - "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" } } @@ -41996,10 +42936,11 @@ "type": "object", "properties": { "name": { - "description": "Required.", + "description": "Name is this DNS resolver option's name. Required.", "type": "string" }, "value": { + "description": "Value is this DNS resolver option's value.", "type": "string" } } @@ -42136,8 +43077,12 @@ "description": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, + "stream": { + "description": "Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".", + "type": "string" + }, "tailLines": { - "description": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "description": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".", "type": "integer", "format": "int64" }, @@ -42306,6 +43251,10 @@ "type": "integer", "format": "int64" }, + "seLinuxChangePolicy": { + "description": "seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\".\n\n\"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.\n\n\"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled.\n\nIf not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes.\n\nThis field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.\n\nAll Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" + }, "seLinuxOptions": { "description": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", "$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions" @@ -42549,6 +43498,10 @@ "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge,retainKeys" }, + "resources": { + "description": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, "restartPolicy": { "description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\nPossible enum values:\n - `\"Always\"`\n - `\"Never\"`\n - `\"OnFailure\"`", "type": "string", @@ -42593,7 +43546,7 @@ "type": "string" }, "setHostnameAsFQDN": { - "description": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", + "description": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", "type": "boolean" }, "shareProcessNamespace": { @@ -42668,7 +43621,7 @@ "x-kubernetes-patch-strategy": "merge" }, "containerStatuses": { - "description": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "description": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "type": "array", "items": { "default": {}, @@ -42677,7 +43630,7 @@ "x-kubernetes-list-type": "atomic" }, "ephemeralContainerStatuses": { - "description": "Status for any ephemeral containers that have run in this pod.", + "description": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "type": "array", "items": { "default": {}, @@ -42701,7 +43654,7 @@ "x-kubernetes-patch-strategy": "merge" }, "initContainerStatuses": { - "description": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "description": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", "type": "array", "items": { "default": {}, @@ -42878,6 +43831,7 @@ } }, "io.k8s.api.core.v1.PortStatus": { + "description": "PortStatus represents the error condition of a service port", "type": "object", "required": [ "port", @@ -42980,7 +43934,7 @@ "type": "object", "properties": { "exec": { - "description": "Exec specifies the action to take.", + "description": "Exec specifies a command to execute in the container.", "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction" }, "failureThreshold": { @@ -42989,11 +43943,11 @@ "format": "int32" }, "grpc": { - "description": "GRPC specifies an action involving a GRPC port.", + "description": "GRPC specifies a GRPC HealthCheckRequest.", "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction" }, "httpGet": { - "description": "HTTPGet specifies the http request to perform.", + "description": "HTTPGet specifies an HTTP GET request to perform.", "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction" }, "initialDelaySeconds": { @@ -43012,7 +43966,7 @@ "format": "int32" }, "tcpSocket": { - "description": "TCPSocket specifies an action involving a TCP port.", + "description": "TCPSocket specifies a connection to a TCP port.", "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction" }, "terminationGracePeriodSeconds": { @@ -43032,19 +43986,19 @@ "type": "object", "properties": { "exec": { - "description": "Exec specifies the action to take.", + "description": "Exec specifies a command to execute in the container.", "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction" }, "grpc": { - "description": "GRPC specifies an action involving a GRPC port.", + "description": "GRPC specifies a GRPC HealthCheckRequest.", "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction" }, "httpGet": { - "description": "HTTPGet specifies the http request to perform.", + "description": "HTTPGet specifies an HTTP GET request to perform.", "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction" }, "tcpSocket": { - "description": "TCPSocket specifies an action involving a TCP port.", + "description": "TCPSocket specifies a connection to a TCP port.", "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction" } } @@ -43451,7 +44405,7 @@ "x-kubernetes-map-type": "atomic" }, "io.k8s.api.core.v1.ResourceHealth": { - "description": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "description": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680.", "type": "object", "required": [ "resourceID" @@ -43614,18 +44568,19 @@ } }, "io.k8s.api.core.v1.ResourceStatus": { + "description": "ResourceStatus represents the status of a single resource allocated to a Pod.", "type": "object", "required": [ "name" ], "properties": { "name": { - "description": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", + "description": "Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. For DRA resources, the value must be \"claim:/\". When this status is reported about a container, the \"claim_name\" and \"request\" must match one of the claims of this container.", "type": "string", "default": "" }, "resources": { - "description": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", + "description": "List of unique resources health. Each element in the list contains an unique resource ID and its health. At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. See ResourceID type definition for a specific format it has in various use cases.", "type": "array", "items": { "default": {}, @@ -44178,7 +45133,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "secrets": { - "description": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "description": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". The \"kubernetes.io/enforce-mountable-secrets\" annotation is deprecated since v1.32. Prefer separate namespaces to isolate access to mounted secrets. This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", "type": "array", "items": { "default": {}, @@ -44472,7 +45427,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SessionAffinityConfig" }, "trafficDistribution": { - "description": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", + "description": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.", "type": "string" }, "type": { @@ -44826,6 +45781,7 @@ "x-kubernetes-map-type": "atomic" }, "io.k8s.api.core.v1.TypedObjectReference": { + "description": "TypedObjectReference contains enough information to let you locate the typed referenced object", "type": "object", "required": [ "kind", @@ -44860,23 +45816,23 @@ ], "properties": { "awsElasticBlockStore": { - "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" }, "azureDisk": { - "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" }, "azureFile": { - "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource" }, "cephfs": { - "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource" }, "cinder": { - "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource" }, "configMap": { @@ -44884,7 +45840,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource" }, "csi": { - "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource" }, "downwardAPI": { @@ -44904,23 +45860,23 @@ "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" }, "flocker": { - "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" }, "gcePersistentDisk": { - "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" }, "gitRepo": { - "description": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "description": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource" }, "glusterfs": { - "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource" }, "hostPath": { @@ -44949,11 +45905,11 @@ "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource" }, "photonPersistentDisk": { - "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" }, "portworxVolume": { - "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" }, "projected": { @@ -44961,15 +45917,15 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource" }, "quobyte": { - "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" }, "rbd": { - "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource" }, "scaleIO": { - "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource" }, "secret": { @@ -44977,11 +45933,11 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource" }, "storageos": { - "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource" }, "vsphereVolume": { - "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" } } @@ -45140,23 +46096,23 @@ "type": "object", "properties": { "awsElasticBlockStore": { - "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" }, "azureDisk": { - "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" }, "azureFile": { - "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource" }, "cephfs": { - "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource" }, "cinder": { - "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource" }, "configMap": { @@ -45164,7 +46120,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource" }, "csi": { - "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource" }, "downwardAPI": { @@ -45184,23 +46140,23 @@ "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" }, "flocker": { - "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" }, "gcePersistentDisk": { - "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" }, "gitRepo": { - "description": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "description": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource" }, "glusterfs": { - "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource" }, "hostPath": { @@ -45224,11 +46180,11 @@ "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource" }, "photonPersistentDisk": { - "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" }, "portworxVolume": { - "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" }, "projected": { @@ -45236,15 +46192,15 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource" }, "quobyte": { - "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" }, "rbd": { - "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource" }, "scaleIO": { - "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource" }, "secret": { @@ -45252,11 +46208,11 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource" }, "storageos": { - "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.", "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource" }, "vsphereVolume": { - "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" } } @@ -46074,6 +47030,10 @@ "type": "integer", "format": "int64" }, + "ignoreStoreReadErrorWithClusterBreakingPotential": { + "description": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it", + "type": "boolean" + }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go index 33e3cf291..498f78df6 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -123,15 +123,15 @@ type RoutingConfig struct { type ImportModeType string const ( - // ImportModeLegacy indicates that the legacy behaviour should be used. - // For manifest lists, the legacy behaviour will discard the manifest list and import a single - // sub-manifest. In this case, the platform is chosen in the following order of priority: - // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. - // This mode is the default. - ImportModeLegacy ImportModeType = "Legacy" - // ImportModePreserveOriginal indicates that the original manifest will be preserved. - // For manifest lists, the manifest list and all its sub-manifests will be imported. - ImportModePreserveOriginal ImportModeType = "PreserveOriginal" + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" ) type ImagePolicyConfig struct { @@ -180,11 +180,11 @@ type AllowedRegistries []RegistryLocation // RegistryLocation contains a location of the registry specified by the registry domain // name. The domain name might include wildcards, like '*' or '??'. type RegistryLocation struct { - // DomainName specifies a domain name for the registry + // domainName specifies a domain name for the registry // In case the registry use non-standard (80 or 443) port, the port should be included // in the domain name as well. DomainName string `json:"domainName"` - // Insecure indicates whether the registry is secure (https) or insecure (http) + // insecure indicates whether the registry is secure (https) or insecure (http) // By default (if not specified) the registry is assumed as secure. Insecure bool `json:"insecure,omitempty"` } @@ -440,36 +440,36 @@ type BuildOverridesConfig struct { // ImageConfig holds the necessary configuration options for building image names for system components type ImageConfig struct { - // Format is the format of the name to be built for the system component + // format is the format of the name to be built for the system component Format string `json:"format"` - // Latest determines if the latest tag will be pulled from the registry + // latest determines if the latest tag will be pulled from the registry Latest bool `json:"latest"` } // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. type ServiceServingCert struct { - // Signer holds the signing information used to automatically sign serving certificates. + // signer holds the signing information used to automatically sign serving certificates. // If this value is nil, then certs are not signed automatically. Signer *configv1.CertInfo `json:"signer"` } // ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. type ClusterNetworkEntry struct { - // CIDR defines the total range of a cluster networks address space. + // cidr defines the total range of a cluster networks address space. CIDR string `json:"cidr"` - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. HostSubnetLength uint32 `json:"hostSubnetLength"` } // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. type SecurityAllocator struct { - // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the // ranges container images will use once user namespaces are started). UIDAllocatorRange string `json:"uidAllocatorRange"` - // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default @@ -480,7 +480,7 @@ type SecurityAllocator struct { // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 // MCSAllocatorRange string `json:"mcsAllocatorRange"` - // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS // ranges (100k namespaces, 535k/5 labels). MCSLabelsPerProject int `json:"mcsLabelsPerProject"` } diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go index 5162e46ba..0c73046ee 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -59,8 +59,8 @@ func (BuildOverridesConfig) SwaggerDoc() map[string]string { var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", - "cidr": "CIDR defines the total range of a cluster networks address space.", - "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", + "cidr": "cidr defines the total range of a cluster networks address space.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { @@ -90,8 +90,8 @@ func (FrontProxyConfig) SwaggerDoc() map[string]string { var map_ImageConfig = map[string]string{ "": "ImageConfig holds the necessary configuration options for building image names for system components", - "format": "Format is the format of the name to be built for the system component", - "latest": "Latest determines if the latest tag will be pulled from the registry", + "format": "format is the format of the name to be built for the system component", + "latest": "latest determines if the latest tag will be pulled from the registry", } func (ImageConfig) SwaggerDoc() map[string]string { @@ -201,8 +201,8 @@ func (ProjectConfig) SwaggerDoc() map[string]string { var map_RegistryLocation = map[string]string{ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", } func (RegistryLocation) SwaggerDoc() map[string]string { @@ -220,9 +220,9 @@ func (RoutingConfig) SwaggerDoc() map[string]string { var map_SecurityAllocator = map[string]string{ "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", - "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", - "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", - "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", + "uidAllocatorRange": "uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", } func (SecurityAllocator) SwaggerDoc() map[string]string { @@ -239,7 +239,7 @@ func (ServiceAccountControllerConfig) SwaggerDoc() map[string]string { var map_ServiceServingCert = map[string]string{ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", - "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", + "signer": "signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", } func (ServiceServingCert) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go index 21919f9a8..5920c4fca 100644 --- a/vendor/github.com/openshift/api/operator/v1/register.go +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -62,6 +62,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &OpenShiftAPIServerList{}, &OpenShiftControllerManager{}, &OpenShiftControllerManagerList{}, + &OLM{}, + &OLMList{}, &ServiceCA{}, &ServiceCAList{}, &ServiceCatalogAPIServer{}, diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index f04b6846a..4b0c48a10 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -16,7 +16,6 @@ type MyOperatorResource struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec MyOperatorResourceSpec `json:"spec"` Status MyOperatorResourceStatus `json:"status"` @@ -145,19 +144,19 @@ type OperatorStatus struct { // GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. type GenerationStatus struct { // group is the group of the thing you're tracking - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // resource is the resource type of the thing you're tracking - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` // namespace is where the thing you're tracking is - // +kubebuilder:validation:Required + // +required Namespace string `json:"namespace"` // name is the name of the thing you're tracking - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // TODO: Add validation for lastGeneration. The value for this field should generally increase, except when the associated @@ -194,21 +193,18 @@ type OperatorCondition struct { // useful (see .node.status.conditions), the ability to deconflict is important. // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` // +kubebuilder:validation:MaxLength=316 Type string `json:"type" protobuf:"bytes,1,opt,name=type"` // status of the condition, one of True, False, Unknown. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=True;False;Unknown Status ConditionStatus `json:"status"` // lastTransitionTime is the last time the condition transitioned from one status to another. // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=date-time LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` @@ -256,18 +252,27 @@ type StaticPodOperatorStatus struct { // +listType=map // +listMapKey=nodeName // +optional + // +kubebuilder:validation:XValidation:rule="size(self.filter(status, status.?targetRevision.orValue(0) != 0)) <= 1",message="no more than 1 node status may have a nonzero targetRevision" NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` } // NodeStatus provides information about the current state of a particular node managed by this operator. +// +kubebuilder:validation:XValidation:rule="has(self.currentRevision) || !has(oldSelf.currentRevision)",message="cannot be unset once set",fieldPath=".currentRevision" +// +kubebuilder:validation:XValidation:rule="oldSelf.hasValue() || !has(self.currentRevision)",message="currentRevision can not be set on creation of a nodeStatus",optionalOldSelf=true,fieldPath=.currentRevision +// +kubebuilder:validation:XValidation:rule="oldSelf.hasValue() || !has(self.targetRevision)",message="targetRevision can not be set on creation of a nodeStatus",optionalOldSelf=true,fieldPath=.targetRevision type NodeStatus struct { // nodeName is the name of the node - // +kubebuilder:validation:Required + // +required NodeName string `json:"nodeName"` - // currentRevision is the generation of the most recently successful deployment - CurrentRevision int32 `json:"currentRevision"` - // targetRevision is the generation of the deployment we're trying to apply + // currentRevision is the generation of the most recently successful deployment. + // Can not be set on creation of a nodeStatus. Updates must only increase the value. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="must only increase" + // +optional + CurrentRevision int32 `json:"currentRevision,omitempty"` + // targetRevision is the generation of the deployment we're trying to apply. + // Can not be set on creation of a nodeStatus. + // +optional TargetRevision int32 `json:"targetRevision,omitempty"` // lastFailedRevision is the generation of the deployment we tried and failed to deploy. diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go index 58d8748d9..bf103f19b 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -25,7 +25,6 @@ type Authentication struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec AuthenticationSpec `json:"spec,omitempty"` // +optional @@ -37,7 +36,7 @@ type AuthenticationSpec struct { } type AuthenticationStatus struct { - // OAuthAPIServer holds status specific only to oauth-apiserver + // oauthAPIServer holds status specific only to oauth-apiserver // +optional OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` @@ -45,7 +44,7 @@ type AuthenticationStatus struct { } type OAuthAPIServerStatus struct { - // LatestAvailableRevision is the latest revision used as suffix of revisioned + // latestAvailableRevision is the latest revision used as suffix of revisioned // secrets like encryption-config. A new revision causes a new deployment of pods. // +optional // +kubebuilder:validation:Minimum=0 diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go index 9666b2792..b6ef52e93 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go +++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -25,7 +25,6 @@ type CloudCredential struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec CloudCredentialSpec `json:"spec"` // +optional @@ -60,7 +59,7 @@ const ( // CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. type CloudCredentialSpec struct { OperatorSpec `json:",inline"` - // CredentialsMode allows informing CCO that it should not attempt to dynamically + // credentialsMode allows informing CCO that it should not attempt to dynamically // determine the root cloud credentials capabilities, and it should just run in // the specified mode. // It also allows putting the operator into "manual" mode if desired. diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go index e7c6d59db..f0d190e6d 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_config.go +++ b/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -26,7 +26,6 @@ type Config struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Config Operator. - // +kubebuilder:validation:Required // +required Spec ConfigSpec `json:"spec"` @@ -56,6 +55,6 @@ type ConfigList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []Config `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index aa39b2f95..c2f25e4e6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -26,7 +26,6 @@ type Console struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec ConsoleSpec `json:"spec,omitempty"` // +optional @@ -128,7 +127,7 @@ type CapabilityVisibility struct { // Disabling the capability in the console UI is represented by the "Disabled" value. // +unionDiscriminator // +kubebuilder:validation:Enum:="Enabled";"Disabled" - // +kubebuilder:validation:Required + // +required State CapabilityState `json:"state"` } @@ -137,15 +136,148 @@ type Capability struct { // name is the unique name of a capability. // Available capabilities are LightspeedButton and GettingStartedBanner. // +kubebuilder:validation:Enum:="LightspeedButton";"GettingStartedBanner" - // +kubebuilder:validation:Required + // +required Name ConsoleCapabilityName `json:"name"` // visibility defines the visibility state of the capability. - // +kubebuilder:validation:Required + // +required Visibility CapabilityVisibility `json:"visibility"` } +// ThemeMode is the value of the logo theme mode that determines the theme mode in the console UI. +// +kubebuilder:validation:Enum="Dark";"Light" +// +enum +type ThemeMode string + +// ThemeMode values +const ( + // ThemeModeDark represents the dark mode for a console theme. + ThemeModeDark ThemeMode = "Dark" + + // ThemeModeLight represents the light mode for a console theme. + ThemeModeLight ThemeMode = "Light" +) + +// LogoType is the value of the logo type that determines if the logo is for the masthead or the favicon in the console UI. +// The masthead logo is displayed in the masthead and about modal of the console UI. +// +kubebuilder:validation:Enum="Masthead";"Favicon" +// +enum +type LogoType string + +const ( + // Masthead represents the logo in the masthead. + LogoTypeMasthead LogoType = "Masthead" + + // Favicon represents the favicon logo. + LogoTypeFavicon LogoType = "Favicon" +) + +// SourceType defines the source type of the file reference. +// +kubebuilder:validation:Enum="ConfigMap" +// +enum +type SourceType string + +const ( + // SourceTypeConfigMap represents a ConfigMap source. + SourceTypeConfigMap SourceType = "ConfigMap" +) + +// ConfigMapFileReference references a specific file within a ConfigMap. +type ConfigMapFileReference struct { + // name is the name of the ConfigMap. + // name is a required field. + // Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + // Must be at most 253 characters in length. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` + + // key is the logo key inside the referenced ConfigMap. + // Must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.). + // Must be at most 253 characters in length. + // Must end in a valid file extension. + // A valid file extension must consist of a period followed by 2 to 5 alpha characters. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-zA-Z0-9._-]+$')",message="The ConfigMap key must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.)." + // +kubebuilder:validation:XValidation:rule="self.matches('.*\\\\.[a-zA-Z]{2,5}$')",message="The ConfigMap key must end with a valid file extension (2 to 5 letters)." + // +required + Key string `json:"key"` +} +// FileReferenceSource is used by the console to locate the specified file containing a custom logo. +// +kubebuilder:validation:XValidation:rule="has(self.from) && self.from == 'ConfigMap' ? has(self.configMap) : !has(self.configMap)",message="configMap is required when from is 'ConfigMap', and forbidden otherwise." +type FileReferenceSource struct { + // from is a required field to specify the source type of the file reference. + // Allowed values are ConfigMap. + // When set to ConfigMap, the file will be sourced from a ConfigMap in the openshift-config namespace. The configMap field must be set when from is set to ConfigMap. + // +required + From SourceType `json:"from"` + + // configMap specifies the ConfigMap sourcing details such as the name of the ConfigMap and the key for the file. + // The ConfigMap must exist in the openshift-config namespace. + // Required when from is "ConfigMap", and forbidden otherwise. + // +optional + ConfigMap *ConfigMapFileReference `json:"configMap"` +} + +// Theme defines a theme mode for the console UI. +type Theme struct { + // mode is used to specify what theme mode a logo will apply to in the console UI. + // mode is a required field that allows values of Dark and Light. + // When set to Dark, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Dark mode. + // When set to Light, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Light mode. + // +required + Mode ThemeMode `json:"mode"` + + // source is used by the console to locate the specified file containing a custom logo. + // source is a required field that references a ConfigMap name and key that contains the custom logo file in the openshift-config namespace. + // You can create it with a command like: + // - 'oc create configmap custom-logos-config --namespace=openshift-config --from-file=/path/to/file' + // The ConfigMap key must include the file extension so that the console serves the file with the correct MIME type. + // The recommended file format for the Masthead and Favicon logos is SVG, but other file formats are allowed if supported by the browser. + // The logo image size must be less than 1 MB due to constraints on the ConfigMap size. + // For more information, see the documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/web_console/customizing-web-console#customizing-web-console + // +required + Source FileReferenceSource `json:"source"` +} + +// Logo defines a configuration based on theme modes for the console UI logo. +type Logo struct { + // type specifies the type of the logo for the console UI. It determines whether the logo is for the masthead or favicon. + // type is a required field that allows values of Masthead and Favicon. + // When set to "Masthead", the logo will be used in the masthead and about modal of the console UI. + // When set to "Favicon", the logo will be used as the favicon of the console UI. + // +required + Type LogoType `json:"type"` + + // themes specifies the themes for the console UI logo. + // themes is a required field that allows a list of themes. Each item in the themes list must have a unique mode and a source field. + // Each mode determines whether the logo is for the dark or light mode of the console UI. + // If a theme is not specified, the default OpenShift logo will be displayed for that theme. + // There must be at least one entry and no more than 2 entries. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +listType=map + // +listMapKey=mode + // +required + Themes []Theme `json:"themes"` +} + // ConsoleCustomization defines a list of optional configuration for the console UI. +// Ensure that Logos and CustomLogoFile cannot be set at the same time. +// +kubebuilder:validation:XValidation:rule="!(has(self.logos) && has(self.customLogoFile))",message="Only one of logos or customLogoFile can be set." type ConsoleCustomization struct { + // logos is used to replace the OpenShift Masthead and Favicon logos in the console UI with custom logos. + // logos is an optional field that allows a list of logos. + // Only one of logos or customLogoFile can be set at a time. + // If logos is set, customLogoFile must be unset. + // When specified, there must be at least one entry and no more than 2 entries. + // Each type must appear only once in the list. + // +kubebuilder:validation:MaxItems=2 + // +listType=map + // +listMapKey=type + // +optional + Logos []Logo `json:"logos"` + // capabilities defines an array of capabilities that can be interacted with in the console UI. // Each capability defines a visual state that can be interacted with the console to render in the UI. // Available capabilities are LightspeedButton and GettingStartedBanner. @@ -173,32 +305,28 @@ type ConsoleCustomization struct { // +optional CustomProductName string `json:"customProductName,omitempty"` // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a + // Only one of customLogoFile or logos can be set at a time. // ConfigMap in the openshift-config namespace. This can be created with a command like // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. // Image size must be less than 1 MB due to constraints on the ConfigMap size. // The ConfigMap key should include a file extension so that the console serves the file // with the correct MIME type. - // Recommended logo specifications: - // Dimensions: Max height of 68px and max width of 200px - // SVG format preferred + // The recommended file format for the logo is SVG, but other file formats are allowed if supported by the browser. + // Deprecated: Use logos instead. // +optional CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` // developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). - // +kubebuilder:validation:Optional // +optional DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective // Project access page which can be used by a project admin to specify roles to other users and // restrict access within the project. If set, the list will replace the default ClusterRole options. - // +kubebuilder:validation:Optional // +optional ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` // quickStarts allows customization of available ConsoleQuickStart resources in console. - // +kubebuilder:validation:Optional // +optional QuickStarts QuickStarts `json:"quickStarts,omitempty"` // addPage allows customizing actions on the Add page in developer perspective. - // +kubebuilder:validation:Optional // +optional AddPage AddPage `json:"addPage,omitempty"` // perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown. @@ -212,7 +340,6 @@ type ConsoleCustomization struct { type ProjectAccess struct { // availableClusterRoles is the list of ClusterRole names that are assignable to users // through the project access tab. - // +kubebuilder:validation:Optional // +optional AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` } @@ -235,7 +362,7 @@ type DeveloperConsoleCatalogTypes struct { // +kubebuilder:validation:Enum:="Enabled";"Disabled"; // +kubebuilder:default:="Enabled" // +default="Enabled" - // +kubebuilder:validation:Required + // +required State CatalogTypesState `json:"state,omitempty"` // enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available @@ -259,7 +386,6 @@ type DeveloperConsoleCatalogTypes struct { // DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. type DeveloperConsoleCatalogCustomization struct { // categories which are shown in the developer catalog. - // +kubebuilder:validation:Optional // +optional Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` // types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. @@ -270,23 +396,20 @@ type DeveloperConsoleCatalogCustomization struct { // DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. type DeveloperConsoleCatalogCategoryMeta struct { - // ID is an identifier used in the URL to enable deep linking in console. + // id is an identifier used in the URL to enable deep linking in console. // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` // +required ID string `json:"id"` // label defines a category display label. It is required and must have 1-64 characters. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +required Label string `json:"label"` // tags is a list of strings that will match the category. A selected category // show all items which has at least one overlapping tag between category and item. - // +kubebuilder:validation:Optional // +optional Tags []string `json:"tags,omitempty"` } @@ -296,7 +419,6 @@ type DeveloperConsoleCatalogCategory struct { // defines top level category ID, label and filter tags. DeveloperConsoleCatalogCategoryMeta `json:",inline"` // subcategories defines a list of child categories. - // +kubebuilder:validation:Optional // +optional Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` } @@ -304,7 +426,6 @@ type DeveloperConsoleCatalogCategory struct { // QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. type QuickStarts struct { // disabled is a list of ConsoleQuickStart resource names that are not shown to users. - // +kubebuilder:validation:Optional // +optional Disabled []string `json:"disabled,omitempty"` } @@ -313,7 +434,6 @@ type QuickStarts struct { type AddPage struct { // disabledActions is a list of actions that are not shown to users. // Each action in the list is represented by its ID. - // +kubebuilder:validation:Optional // +kubebuilder:validation:MinItems=1 // +optional DisabledActions []string `json:"disabledActions,omitempty"` @@ -350,7 +470,7 @@ type PerspectiveVisibility struct { // state defines the perspective is enabled or disabled or access review check is required. // +unionDiscriminator // +kubebuilder:validation:Enum:="Enabled";"Disabled";"AccessReview" - // +kubebuilder:validation:Required + // +required State PerspectiveState `json:"state"` // accessReview defines required and missing access review checks. // +optional @@ -365,10 +485,10 @@ type Perspective struct { // Example: "dev", "admin". // The available perspective ids can be found in the code snippet section next to the yaml editor. // Incorrect or unknown ids will be ignored. - // +kubebuilder:validation:Required + // +required ID string `json:"id"` // visibility defines the state of perspective along with access review checks if needed for that perspective. - // +kubebuilder:validation:Required + // +required Visibility PerspectiveVisibility `json:"visibility"` // pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. // The list of available Kubernetes resources could be read via `kubectl api-resources`. @@ -386,20 +506,20 @@ type PinnedResourceReference struct { // This value should consist of only lowercase alphanumeric characters, hyphens and periods. // Example: "", "apps", "build.openshift.io", etc. // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // version is the API Version of the Resource. // This value should consist of only lowercase alphanumeric characters. // Example: "v1", "v1beta1", etc. // +kubebuilder:validation:Pattern:="^[a-z0-9]+$" - // +kubebuilder:validation:Required + // +required Version string `json:"version"` // resource is the type that is being referenced. // It is normally the plural form of the resource kind in lowercase. // This value should consist of only lowercase alphanumeric characters and hyphens. // Example: "deployments", "deploymentconfigs", "pods", etc. // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 0644b6a93..b25133a42 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -20,7 +20,7 @@ import ( // +kubebuilder:resource:path=clustercsidrivers,scope=Cluster // +kubebuilder:subresource:status // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/701 -// +openshift:file-pattern=cvoRunLevel=0000_90,operatorName=csi-driver,operatorOrdering=01 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=csi-driver,operatorOrdering=01 // ClusterCSIDriver object allows management and configuration of a CSI driver operator // installed by default in OpenShift. Name of the object must be name of the CSI driver @@ -36,7 +36,6 @@ type ClusterCSIDriver struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ClusterCSIDriverSpec `json:"spec"` @@ -71,7 +70,7 @@ const ( RemovedStorageClass StorageClassStateName = "Removed" ) -// If you are adding a new driver name here, ensure that 0000_90_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. +// If you are adding a new driver name here, ensure that 0000_50_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. const ( AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" @@ -95,7 +94,7 @@ const ( // ClusterCSIDriverSpec is the desired behavior of CSI driver operator type ClusterCSIDriverSpec struct { OperatorSpec `json:",inline"` - // StorageClassState determines if CSI operator should create and manage storage classes. + // storageClassState determines if CSI operator should create and manage storage classes. // If this field value is empty or Managed - CSI operator will continuously reconcile // storage class and create if necessary. // If this field value is Unmanaged - CSI operator will not reconcile any previously created @@ -135,7 +134,7 @@ type CSIDriverConfigSpec struct { // driverConfig is being applied to. // Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. // Consumers should treat unknown values as a NO-OP. - // +kubebuilder:validation:Required + // +required // +unionDiscriminator DriverType CSIDriverType `json:"driverType"` @@ -155,7 +154,7 @@ type CSIDriverConfigSpec struct { // +optional IBMCloud *IBMCloudCSIDriverConfigSpec `json:"ibmcloud,omitempty"` - // vsphere is used to configure the vsphere CSI driver. + // vSphere is used to configure the vsphere CSI driver. // +optional VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` } @@ -198,7 +197,7 @@ type AWSEFSVolumeMetrics struct { // RecursiveWalk means the AWS EFS CSI Driver will recursively scan volumes to collect metrics. // This process may result in high CPU and memory usage, depending on the volume size. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required State AWSEFSVolumeMetricsState `json:"state"` // recursiveWalk provides additional configuration for collecting volume metrics in the AWS EFS CSI Driver @@ -240,7 +239,7 @@ type AzureDiskEncryptionSet struct { // 5. The second, third, and fourth groups should be 4 characters long. // 6. The fifth group should be 12 characters long. // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=36 // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$` SubscriptionID string `json:"subscriptionID"` @@ -250,7 +249,7 @@ type AzureDiskEncryptionSet struct { // underscores (_), parentheses, hyphens and periods. // The value should not end in a period and be at most 90 characters in // length. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=90 // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$` ResourceGroup string `json:"resourceGroup"` @@ -258,7 +257,7 @@ type AzureDiskEncryptionSet struct { // name is the name of the disk encryption set that will be set on the default storage class. // The value should consist of only alphanumberic characters, // underscores (_), hyphens, and be at most 80 characters in length. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=80 // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` Name string `json:"name"` @@ -281,7 +280,7 @@ type GCPKMSKeyReference struct { // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` // +kubebuilder:validation:MinLength:=1 // +kubebuilder:validation:MaxLength:=63 - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. @@ -291,7 +290,7 @@ type GCPKMSKeyReference struct { // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` // +kubebuilder:validation:MinLength:=1 // +kubebuilder:validation:MaxLength:=63 - // +kubebuilder:validation:Required + // +required KeyRing string `json:"keyRing"` // projectID is the ID of the Project in which the KMS Key Ring exists. @@ -300,7 +299,7 @@ type GCPKMSKeyReference struct { // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$` // +kubebuilder:validation:MinLength:=6 // +kubebuilder:validation:MaxLength:=30 - // +kubebuilder:validation:Required + // +required ProjectID string `json:"projectID"` // location is the GCP location in which the Key Ring exists. @@ -323,7 +322,7 @@ type GCPCSIDriverConfigSpec struct { type IBMCloudCSIDriverConfigSpec struct { // encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use // for disk encryption of volumes for the default storage classes. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=154 // +kubebuilder:validation:MinLength:=144 // +kubebuilder:validation:Pattern:=`^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$` @@ -370,6 +369,21 @@ type VSphereCSIDriverConfigSpec struct { // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` + + // maxAllowedBlockVolumesPerNode is an optional configuration parameter that allows setting a custom value for the + // limit of the number of PersistentVolumes attached to a node. In vSphere version 7 this limit was set to 59 by + // default, however in vSphere version 8 this limit was increased to 255. + // Before increasing this value above 59 the cluster administrator needs to ensure that every node forming the + // cluster is updated to ESXi version 8 or higher and that all nodes are running the same version. + // The limit must be between 1 and 255, which matches the vSphere version 8 maximum. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to + // change over time. + // The current default is 59, which matches the limit for vSphere version 7. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=255 + // +openshift:enable:FeatureGate=VSphereConfigurableMaxAllowedBlockVolumesPerNode + // +optional + MaxAllowedBlockVolumesPerNode int32 `json:"maxAllowedBlockVolumesPerNode,omitempty"` } // ClusterCSIDriverStatus is the observed status of CSI driver operator diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go index f96384819..d6d283d36 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -25,7 +25,6 @@ type CSISnapshotController struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec CSISnapshotControllerSpec `json:"spec"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index 3d7cbb6c0..258804786 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -226,7 +226,7 @@ type DNSOverTLSConfig struct { // // + --- // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218 - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` ServerName string `json:"serverName"` @@ -319,7 +319,7 @@ type ForwardPlugin struct { // * At least one upstream should be specified. // * the default policy is Sequential type UpstreamResolvers struct { - // Upstreams is a list of resolvers to forward name queries for the "." domain. + // upstreams is a list of resolvers to forward name queries for the "." domain. // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream // returns an error during the exchange, another resolver is tried from Upstreams. The // Upstreams are selected in the order specified in Policy. @@ -332,7 +332,7 @@ type UpstreamResolvers struct { // +kubebuilder:default={{"type":"SystemResolvConf"}} Upstreams []Upstream `json:"upstreams"` - // Policy is used to determine the order in which upstream servers are selected for querying. + // policy is used to determine the order in which upstream servers are selected for querying. // Any one of the following values may be specified: // // * "Random" picks a random upstream server for each query. @@ -381,31 +381,28 @@ type UpstreamResolvers struct { // with an IP address or IP:port if the upstream listens on a port other than 53. type Upstream struct { - // Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. + // type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. // Type accepts 2 possible values: SystemResolvConf or Network. // // * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: // /etc/resolv.conf will be used // * When Network is used, the Upstream structure must contain at least an Address // - // +kubebuilder:validation:Required // +required Type UpstreamType `json:"type"` - // Address must be defined when Type is set to Network. It will be ignored otherwise. + // address must be defined when Type is set to Network. It will be ignored otherwise. // It must be a valid ipv4 or ipv6 address. // // +optional - // +kubebuilder:validation:Optional Address string `json:"address,omitempty"` - // Port may be defined when Type is set to Network. It will be ignored otherwise. + // port may be defined when Type is set to Network. It will be ignored otherwise. // Port must be between 65535 // // +optional // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional // +kubebuilder:default=53 Port uint32 `json:"port,omitempty"` } @@ -483,7 +480,6 @@ type DNSStatus struct { // // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // - // +kubebuilder:validation:Required // +required ClusterIP string `json:"clusterIP"` @@ -494,7 +490,6 @@ type DNSStatus struct { // // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service // - // +kubebuilder:validation:Required // +required ClusterDomain string `json:"clusterDomain"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index 71345d7d7..375ec5fb7 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -24,7 +24,6 @@ type Etcd struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec EtcdSpec `json:"spec"` // +optional @@ -39,7 +38,6 @@ type EtcdSpec struct { // Valid values are "", "Standard" and "Slower". // "" means no opinion and the platform is left to choose a reasonable default // which is subject to change without notice. - // +kubebuilder:validation:Optional // +openshift:enable:FeatureGate=HardwareSpeed // +optional HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` @@ -93,6 +91,6 @@ type EtcdList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []Etcd `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 1f5664345..240ab12c7 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -258,6 +258,75 @@ type IngressControllerSpec struct { // // +optional HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"` + + // idleConnectionTerminationPolicy maps directly to HAProxy's + // idle-close-on-response option and controls whether HAProxy + // keeps idle frontend connections open during a soft stop + // (router reload). + // + // Allowed values for this field are "Immediate" and + // "Deferred". The default value is "Immediate". + // + // When set to "Immediate", idle connections are closed + // immediately during router reloads. This ensures immediate + // propagation of route changes but may impact clients + // sensitive to connection resets. + // + // When set to "Deferred", HAProxy will maintain idle + // connections during a soft reload instead of closing them + // immediately. These connections remain open until any of the + // following occurs: + // + // - A new request is received on the connection, in which + // case HAProxy handles it in the old process and closes + // the connection after sending the response. + // + // - HAProxy's `timeout http-keep-alive` duration expires + // (300 seconds in OpenShift's configuration, not + // configurable). + // + // - The client's keep-alive timeout expires, causing the + // client to close the connection. + // + // Setting Deferred can help prevent errors in clients or load + // balancers that do not properly handle connection resets. + // Additionally, this option allows you to retain the pre-2.4 + // HAProxy behaviour: in HAProxy version 2.2 (OpenShift + // versions < 4.14), maintaining idle connections during a + // soft reload was the default behaviour, but starting with + // HAProxy 2.4, the default changed to closing idle + // connections immediately. + // + // Important Consideration: + // + // - Using Deferred will result in temporary inconsistencies + // for the first request on each persistent connection + // after a route update and router reload. This request + // will be processed by the old HAProxy process using its + // old configuration. Subsequent requests will use the + // updated configuration. + // + // Operational Considerations: + // + // - Keeping idle connections open during reloads may lead + // to an accumulation of old HAProxy processes if + // connections remain idle for extended periods, + // especially in environments where frequent reloads + // occur. + // + // - Consider monitoring the number of HAProxy processes in + // the router pods when Deferred is set. + // + // - You may need to enable or adjust the + // `ingress.operator.openshift.io/hard-stop-after` + // duration (configured via an annotation on the + // IngressController resource) in environments with + // frequent reloads to prevent resource exhaustion. + // + // +optional + // +kubebuilder:default:="Immediate" + // +default="Immediate" + IdleConnectionTerminationPolicy IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` } // httpCompressionPolicy turns on compression for the specified MIME types. @@ -397,7 +466,6 @@ type LoadBalancerStrategy struct { // scope indicates the scope at which the load balancer is exposed. // Possible values are "External" and "Internal". // - // +kubebuilder:validation:Required // +required Scope LoadBalancerScope `json:"scope"` @@ -434,7 +502,7 @@ type LoadBalancerStrategy struct { // Valid values are: Managed and Unmanaged. // // +kubebuilder:default:="Managed" - // +kubebuilder:validation:Required + // +required // +default="Managed" DNSManagementPolicy LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` } @@ -464,7 +532,6 @@ type ProviderLoadBalancerParameters struct { // "OpenStack", and "VSphere". // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type LoadBalancerProviderType `json:"type"` @@ -544,7 +611,6 @@ type AWSLoadBalancerParameters struct { // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type AWSLoadBalancerType `json:"type"` @@ -713,7 +779,6 @@ type AWSClassicLoadBalancerParameters struct { // means no opinion, in which case a default value is used. The default // value for this field is 60s. This default is subject to change. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ConnectionIdleTimeout metav1.Duration `json:"connectionIdleTimeout,omitempty"` @@ -738,9 +803,9 @@ type AWSClassicLoadBalancerParameters struct { // AWSNetworkLoadBalancerParameters holds configuration parameters for an // AWS Network load balancer. For Example: Setting AWS EIPs https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html -// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule=`has(self.subnets) && has(self.subnets.ids) && has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids + self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule=`has(self.subnets) && has(self.subnets.ids) && !has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule=`has(self.subnets) && has(self.subnets.names) && !has(self.subnets.ids) && has(self.eipAllocations) ? size(self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.ids) && has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids + self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.ids) && !has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.names) && !has(self.subnets.ids) && has(self.eipAllocations) ? size(self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" type AWSNetworkLoadBalancerParameters struct { // subnets specifies the subnets to which the load balancer will // attach. The subnets may be specified by either their @@ -828,7 +893,6 @@ type HostNetworkStrategy struct { // The empty string specifies the default, which is TCP without PROXY // protocol. Note that the default is subject to change. // - // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` @@ -836,7 +900,6 @@ type HostNetworkStrategy struct { // HTTP requests. This field should be set when port 80 is already in use. // The value should not coincide with the NodePort range of the cluster. // When the value is 0 or is not specified it defaults to 80. - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Minimum=0 // +kubebuilder:default=80 @@ -847,7 +910,6 @@ type HostNetworkStrategy struct { // HTTPS requests. This field should be set when port 443 is already in use. // The value should not coincide with the NodePort range of the cluster. // When the value is 0 or is not specified it defaults to 443. - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Minimum=0 // +kubebuilder:default=443 @@ -868,7 +930,6 @@ type HostNetworkStrategy struct { // a threshold of two successful or failed requests to become healthy or // unhealthy respectively, are well-tested values. When the value is 0 or // is not specified it defaults to 1936. - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Minimum=0 // +kubebuilder:default=1936 @@ -904,7 +965,6 @@ type PrivateStrategy struct { // The empty string specifies the default, which is TCP without PROXY // protocol. Note that the default is subject to change. // - // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` } @@ -936,7 +996,6 @@ type NodePortStrategy struct { // The empty string specifies the default, which is TCP without PROXY // protocol. Note that the default is subject to change. // - // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` } @@ -1004,7 +1063,6 @@ type EndpointPublishingStrategy struct { // field of the managed NodePort Service will preserved. // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type EndpointPublishingStrategyType `json:"type"` @@ -1054,7 +1112,6 @@ type ClientTLS struct { // edge-terminated and reencrypt TLS routes; it cannot check // certificates for cleartext HTTP or passthrough TLS routes. // - // +kubebuilder:validation:Required // +required ClientCertificatePolicy ClientCertificatePolicy `json:"clientCertificatePolicy"` @@ -1063,7 +1120,6 @@ type ClientTLS struct { // certificate. The administrator must create this configmap in the // openshift-config namespace. // - // +kubebuilder:validation:Required // +required ClientCA configv1.ConfigMapNameReference `json:"clientCA"` @@ -1167,14 +1223,12 @@ type SyslogLoggingDestinationParameters struct { // address is the IP address of the syslog endpoint that receives log // messages. // - // +kubebuilder:validation:Required // +required Address string `json:"address"` // port is the UDP port number of the syslog endpoint that receives log // messages. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +required @@ -1184,7 +1238,6 @@ type SyslogLoggingDestinationParameters struct { // // If this field is empty, the facility is "local1". // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 // +optional Facility string `json:"facility,omitempty"` @@ -1244,7 +1297,6 @@ type LoggingDestination struct { // that the administrator has configured a custom syslog instance. // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type LoggingDestinationType `json:"type"` @@ -1267,7 +1319,6 @@ type IngressControllerCaptureHTTPHeader struct { // name specifies a header name. Its value must be a valid HTTP header // name as defined in RFC 2616 section 4.2. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" // +required Name string `json:"name"` @@ -1277,7 +1328,6 @@ type IngressControllerCaptureHTTPHeader struct { // log message. Note that the ingress controller may impose a separate // bound on the total length of HTTP headers in a request. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +required MaxLength int `json:"maxLength"` @@ -1331,7 +1381,6 @@ type IngressControllerCaptureHTTPCookie struct { // controller may impose a separate bound on the total length of HTTP // headers in a request. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=1024 // +required @@ -1351,7 +1400,6 @@ type IngressControllerCaptureHTTPCookieUnion struct { // matching cookie is captured. // // +unionDiscriminator - // +kubebuilder:validation:Required // +required MatchType CookieMatchType `json:"matchType,omitempty"` @@ -1389,7 +1437,6 @@ const ( type AccessLogging struct { // destination is where access logs go. // - // +kubebuilder:validation:Required // +required Destination LoggingDestination `json:"destination"` @@ -1646,7 +1693,7 @@ type IngressControllerHTTPHeader struct { // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. // It must be no more than 255 characters in length. // Header name must be unique. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" @@ -1657,7 +1704,7 @@ type IngressControllerHTTPHeader struct { // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" Name string `json:"name"` // action specifies actions to perform on headers, such as setting or deleting headers. - // +kubebuilder:validation:Required + // +required Action IngressControllerHTTPHeaderActionUnion `json:"action"` } @@ -1671,7 +1718,7 @@ type IngressControllerHTTPHeaderActionUnion struct { // Delete allows you to delete HTTP request and response headers. // +unionDiscriminator // +kubebuilder:validation:Enum:=Set;Delete - // +kubebuilder:validation:Required + // +required Type IngressControllerHTTPHeaderActionType `json:"type"` // set specifies how the HTTP header should be set. @@ -1704,7 +1751,7 @@ type IngressControllerSetHTTPHeader struct { // + --- // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. // + See . - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=16384 Value string `json:"value"` @@ -1725,7 +1772,6 @@ type IngressControllerTuningOptions struct { // headerBufferBytes values that are too large could cause the // IngressController to use significantly more memory than necessary. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Minimum=16384 // +optional HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` @@ -1745,7 +1791,6 @@ type IngressControllerTuningOptions struct { // large could cause the IngressController to use significantly more memory // than necessary. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Minimum=4096 // +optional HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` @@ -1763,7 +1808,6 @@ type IngressControllerTuningOptions struct { // Reducing the number of threads may cause the ingress controller to // perform poorly. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=64 // +optional @@ -1773,7 +1817,6 @@ type IngressControllerTuningOptions struct { // waiting for a client response. // // If unset, the default timeout is 30s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` @@ -1783,7 +1826,6 @@ type IngressControllerTuningOptions struct { // connection. // // If unset, the default timeout is 1s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` @@ -1792,7 +1834,6 @@ type IngressControllerTuningOptions struct { // waiting for a server/backend response. // // If unset, the default timeout is 30s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` @@ -1802,7 +1843,6 @@ type IngressControllerTuningOptions struct { // connection. // // If unset, the default timeout is 1s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` @@ -1811,12 +1851,11 @@ type IngressControllerTuningOptions struct { // websockets) will be held open while the tunnel is idle. // // If unset, the default timeout is 1h - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` - // ConnectTimeout defines the maximum time to wait for + // connectTimeout defines the maximum time to wait for // a connection attempt to a server/backend to succeed. // // This field expects an unsigned duration string of decimal numbers, each with optional @@ -1827,7 +1866,6 @@ type IngressControllerTuningOptions struct { // to choose a reasonable default. This default is subject to change over time. // The current default is 5s. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ // +kubebuilder:validation:Type:=string // +optional @@ -1841,7 +1879,6 @@ type IngressControllerTuningOptions struct { // matching certificate could be used. // // If unset, the default inspect delay is 5s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` @@ -1867,7 +1904,6 @@ type IngressControllerTuningOptions struct { // Currently the minimum allowed value is 1s and the maximum allowed value is // 2147483647ms (24.85 days). Both are subject to change over time. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ // +kubebuilder:validation:Type:=string // +optional @@ -1914,7 +1950,6 @@ type IngressControllerTuningOptions struct { // processes in router containers with the following metric: // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'. // - // +kubebuilder:validation:Optional // +optional MaxConnections int32 `json:"maxConnections,omitempty"` @@ -1945,7 +1980,6 @@ type IngressControllerTuningOptions struct { // be reloaded less frequently, and newly created routes will not be served until the // subsequent reload. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ // +kubebuilder:validation:Type:=string // +optional @@ -2068,3 +2102,23 @@ type IngressControllerList struct { Items []IngressController `json:"items"` } + +// IngressControllerConnectionTerminationPolicy defines the behaviour +// for handling idle connections during a soft reload of the router. +// +// +kubebuilder:validation:Enum=Immediate;Deferred +type IngressControllerConnectionTerminationPolicy string + +const ( + // IngressControllerConnectionTerminationPolicyImmediate specifies + // that idle connections should be closed immediately during a + // router reload. + IngressControllerConnectionTerminationPolicyImmediate IngressControllerConnectionTerminationPolicy = "Immediate" + + // IngressControllerConnectionTerminationPolicyDeferred + // specifies that idle connections should remain open until a + // terminating event, such as a new request, the expiration of + // the proxy keep-alive timeout, or the client closing the + // connection. + IngressControllerConnectionTerminationPolicyDeferred IngressControllerConnectionTerminationPolicy = "Deferred" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_insights.go b/vendor/github.com/openshift/api/operator/v1/types_insights.go index 56e2b51c1..ed59bb438 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_insights.go +++ b/vendor/github.com/openshift/api/operator/v1/types_insights.go @@ -25,7 +25,7 @@ type InsightsOperator struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Insights. - // +kubebuilder:validation:Required + // +required Spec InsightsOperatorSpec `json:"spec"` // status is the most recently observed status of the Insights operator. @@ -58,7 +58,7 @@ type GatherStatus struct { // lastGatherDuration is the total time taken to process // all gatherers during the last gather event. // +optional - // +kubebuilder:validation:Pattern="^0|([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Pattern="^(0|([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" // +kubebuilder:validation:Type=string LastGatherDuration metav1.Duration `json:"lastGatherDuration,omitempty"` // gatherers is a list of active gatherers (and their statuses) in the last gathering. @@ -85,25 +85,25 @@ type InsightsReport struct { // healthCheck represents an Insights health check attributes. type HealthCheck struct { // description provides basic description of the healtcheck. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:MinLength=10 Description string `json:"description"` // totalRisk of the healthcheck. Indicator of the total risk posed // by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, // and the higher the number, the more important the issue. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=4 TotalRisk int32 `json:"totalRisk"` // advisorURI provides the URL link to the Insights Advisor. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern=`^https:\/\/\S+` AdvisorURI string `json:"advisorURI"` // state determines what the current state of the health check is. // Health check is enabled by default and can be disabled // by the user in the Insights advisor user interface. - // +kubebuilder:validation:Required + // +required State HealthCheckState `json:"state"` } @@ -124,18 +124,18 @@ const ( type GathererStatus struct { // conditions provide details on the status of each gatherer. // +listType=atomic - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 Conditions []metav1.Condition `json:"conditions"` // name is the name of the gatherer. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:MinLength=5 Name string `json:"name"` // lastGatherDuration represents the time spent gathering. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string - // +kubebuilder:validation:Pattern="^([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Pattern="^(([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" LastGatherDuration metav1.Duration `json:"lastGatherDuration"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index 5c9d43a2a..ce00b4b62 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -26,7 +26,6 @@ type KubeAPIServer struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes API Server - // +kubebuilder:validation:Required // +required Spec KubeAPIServerSpec `json:"spec"` @@ -78,6 +77,6 @@ type KubeAPIServerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeAPIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go index 93ab209a0..ee104aa50 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -25,7 +25,6 @@ type KubeControllerManager struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes Controller Manager - // +kubebuilder:validation:Required // +required Spec KubeControllerManagerSpec `json:"spec"` @@ -63,6 +62,6 @@ type KubeControllerManagerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeControllerManager `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go index 470dc5097..f3add4910 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -24,7 +24,6 @@ type KubeStorageVersionMigrator struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec KubeStorageVersionMigratorSpec `json:"spec"` // +optional @@ -52,6 +51,6 @@ type KubeStorageVersionMigratorList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeStorageVersionMigrator `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 8bd41eb69..4c53734d8 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -25,7 +25,7 @@ type MachineConfiguration struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Machine Config Operator - // +kubebuilder:validation:Required + // +required Spec MachineConfigurationSpec `json:"spec"` // status is the most recently observed status of the Machine Config Operator @@ -41,8 +41,10 @@ type MachineConfigurationSpec struct { // managedBootImages allows configuration for the management of boot images for machine // resources within the cluster. This configuration allows users to select resources that should // be updated to the latest boot images during cluster upgrades, ensuring that new machines - // always boot with the current cluster version's boot image. When omitted, no boot images - // will be updated. + // always boot with the current cluster version's boot image. When omitted, this means no opinion + // and the platform is left to choose a reasonable default, which is subject to change over time. + // The default for each machine manager mode is All for GCP and AWS platforms, and None for all + // other platforms. // +openshift:enable:FeatureGate=ManagedBootImages // +optional ManagedBootImages ManagedBootImages `json:"managedBootImages"` @@ -62,11 +64,10 @@ type MachineConfigurationStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // conditions is a list of conditions and their status - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` // Previously there was a StaticPodOperatorStatus here for legacy reasons. Many of the fields within // it are no longer relevant for the MachineConfiguration CRD's functions. The following remainder @@ -96,6 +97,12 @@ type MachineConfigurationStatus struct { // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"` + + // managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is + // and will be used by Machine Config Controller while performing boot image updates. + // +openshift:enable:FeatureGate=ManagedBootImages + // +optional + ManagedBootImagesStatus ManagedBootImages `json:"managedBootImagesStatus"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -111,7 +118,7 @@ type MachineConfigurationList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []MachineConfiguration `json:"items"` } @@ -122,6 +129,7 @@ type ManagedBootImages struct { // +listType=map // +listMapKey=resource // +listMapKey=apiGroup + // +kubebuilder:validation:MaxItems=5 MachineManagers []MachineManager `json:"machineManagers"` } @@ -131,17 +139,17 @@ type MachineManager struct { // resource is the machine management resource's type. // The only current valid value is machinesets. // machinesets means that the machine manager will only register resources of the kind MachineSet. - // +kubebuilder:validation:Required + // +required Resource MachineManagerMachineSetsResourceType `json:"resource"` // apiGroup is name of the APIGroup that the machine management resource belongs to. // The only current valid value is machine.openshift.io. // machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. - // +kubebuilder:validation:Required + // +required APIGroup MachineManagerMachineSetsAPIGroupType `json:"apiGroup"` // selection allows granular control of the machine management resources that will be registered for boot image updates. - // +kubebuilder:validation:Required + // +required Selection MachineManagerSelector `json:"selection"` } @@ -152,8 +160,9 @@ type MachineManagerSelector struct { // Valid values are All and Partial. // All means that every resource matched by the machine manager will be updated. // Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + // None means that every resource matched by the machine manager will not be updated. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Mode MachineManagerSelectorMode `json:"mode"` // partial provides label selector(s) that can be used to match machine management resources. @@ -165,12 +174,12 @@ type MachineManagerSelector struct { // PartialSelector provides label selector(s) that can be used to match machine management resources. type PartialSelector struct { // machineResourceSelector is a label selector that can be used to select machine resources like MachineSets. - // +kubebuilder:validation:Required + // +required MachineResourceSelector *metav1.LabelSelector `json:"machineResourceSelector,omitempty"` } // MachineManagerSelectorMode is a string enum used in the MachineManagerSelector union discriminator. -// +kubebuilder:validation:Enum:="All";"Partial" +// +kubebuilder:validation:Enum:="All";"Partial";"None" type MachineManagerSelectorMode string const ( @@ -180,6 +189,9 @@ const ( // Partial represents a configuration mode that will register resources specified by the parent MachineManager only // if they match with the label selector. Partial MachineManagerSelectorMode = "Partial" + + // None represents a configuration mode that excludes all resources specified by the parent MachineManager from boot image updates. + None MachineManagerSelectorMode = "None" ) // MachineManagerManagedResourceType is a string enum used in the MachineManager type to describe the resource @@ -255,7 +267,7 @@ type NodeDisruptionPolicyClusterStatus struct { type NodeDisruptionPolicySpecFile struct { // path is the location of a file being managed through a MachineConfig. // The Actions in the policy will apply to changes to the file at this path. - // +kubebuilder:validation:Required + // +required Path string `json:"path"` // actions represents the series of commands to be executed on changes to the file at // the corresponding file path. Actions will be applied in the order that @@ -264,7 +276,7 @@ type NodeDisruptionPolicySpecFile struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -276,7 +288,7 @@ type NodeDisruptionPolicySpecFile struct { type NodeDisruptionPolicyStatusFile struct { // path is the location of a file being managed through a MachineConfig. // The Actions in the policy will apply to changes to the file at this path. - // +kubebuilder:validation:Required + // +required Path string `json:"path"` // actions represents the series of commands to be executed on changes to the file at // the corresponding file path. Actions will be applied in the order that @@ -285,7 +297,7 @@ type NodeDisruptionPolicyStatusFile struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -300,7 +312,7 @@ type NodeDisruptionPolicySpecUnit struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required Name NodeDisruptionPolicyServiceName `json:"name"` // actions represents the series of commands to be executed on changes to the file at @@ -310,7 +322,7 @@ type NodeDisruptionPolicySpecUnit struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -325,7 +337,7 @@ type NodeDisruptionPolicyStatusUnit struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required Name NodeDisruptionPolicyServiceName `json:"name"` // actions represents the series of commands to be executed on changes to the file at @@ -335,7 +347,7 @@ type NodeDisruptionPolicyStatusUnit struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -352,7 +364,7 @@ type NodeDisruptionPolicySpecSSHKey struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -369,7 +381,7 @@ type NodeDisruptionPolicyStatusSSHKey struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -386,7 +398,7 @@ type NodeDisruptionPolicySpecAction struct { // reload/restart requires a corresponding service target specified in the reload/restart field. // Other values require no further configuration // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NodeDisruptionPolicySpecActionType `json:"type"` // reload specifies the service to reload, only valid if type is reload // +optional @@ -405,7 +417,7 @@ type NodeDisruptionPolicyStatusAction struct { // reload/restart requires a corresponding service target specified in the reload/restart field. // Other values require no further configuration // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NodeDisruptionPolicyStatusActionType `json:"type"` // reload specifies the service to reload, only valid if type is reload // +optional @@ -421,7 +433,7 @@ type ReloadService struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` } @@ -431,7 +443,7 @@ type RestartService struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 9b1588bc2..713939ddb 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -54,7 +54,7 @@ type NetworkList struct { // NetworkSpec is the top-level network configuration object. // +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=AdditionalRoutingCapabilities,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteAdvertisements,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" type NetworkSpec struct { OperatorSpec `json:",inline"` @@ -79,9 +79,10 @@ type NetworkSpec struct { // +listMapKey=name AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` - // disableMultiNetwork specifies whether or not multiple pod network - // support should be disabled. If unset, this property defaults to - // 'false' and multiple network support is enabled. + // disableMultiNetwork defaults to 'false' and this setting enables the pod multi-networking capability. + // disableMultiNetwork when set to 'true' at cluster install time does not install the components, typically the Multus CNI and the network-attachment-definition CRD, + // that enable the pod multi-networking capability. Setting the parameter to 'true' might be useful when you need install third-party CNI plugins, + // but these plugins are not supported by Red Hat. Changing the parameter value as a postinstallation cluster task has no effect. DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` // useMultiNetworkPolicy enables a controller which allows for @@ -250,7 +251,7 @@ type DefaultNetworkDefinition struct { // All NetworkTypes are supported except for NetworkTypeRaw Type NetworkType `json:"type"` - // openShiftSDNConfig was previously used to configure the openshift-sdn plugin. + // openshiftSDNConfig was previously used to configure the openshift-sdn plugin. // DEPRECATED: OpenShift SDN is no longer supported. // +optional OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` @@ -267,7 +268,7 @@ type SimpleMacvlanConfig struct { // +optional Master string `json:"master,omitempty"` - // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + // ipamConfig configures IPAM module will be used for IP Address Management (IPAM). // +optional IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` @@ -284,19 +285,19 @@ type SimpleMacvlanConfig struct { // StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses type StaticIPAMAddresses struct { - // Address is the IP address in CIDR format + // address is the IP address in CIDR format // +optional Address string `json:"address"` - // Gateway is IP inside of subnet to designate as the gateway + // gateway is IP inside of subnet to designate as the gateway // +optional Gateway string `json:"gateway,omitempty"` } // StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes type StaticIPAMRoutes struct { - // Destination points the IP route destination + // destination points the IP route destination Destination string `json:"destination"` - // Gateway is the route's next-hop IP address + // gateway is the route's next-hop IP address // If unset, a default gateway is assumed (as determined by the CNI plugin). // +optional Gateway string `json:"gateway,omitempty"` @@ -304,14 +305,14 @@ type StaticIPAMRoutes struct { // StaticIPAMDNS provides DNS related information for static IPAM type StaticIPAMDNS struct { - // Nameservers points DNS servers for IP lookup + // nameservers points DNS servers for IP lookup // +optional // +listType=atomic Nameservers []string `json:"nameservers,omitempty"` - // Domain configures the domainname the local domain used for short hostname lookups + // domain configures the domainname the local domain used for short hostname lookups // +optional Domain string `json:"domain,omitempty"` - // Search configures priority ordered search domains for short hostname lookups + // search configures priority ordered search domains for short hostname lookups // +optional // +listType=atomic Search []string `json:"search,omitempty"` @@ -319,26 +320,26 @@ type StaticIPAMDNS struct { // StaticIPAMConfig contains configurations for static IPAM (IP Address Management) type StaticIPAMConfig struct { - // Addresses configures IP address for the interface + // addresses configures IP address for the interface // +optional // +listType=atomic Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` - // Routes configures IP routes for the interface + // routes configures IP routes for the interface // +optional // +listType=atomic Routes []StaticIPAMRoutes `json:"routes,omitempty"` - // DNS configures DNS for the interface + // dns configures DNS for the interface // +optional DNS *StaticIPAMDNS `json:"dns,omitempty"` } // IPAMConfig contains configurations for IPAM (IP Address Management) type IPAMConfig struct { - // Type is the type of IPAM module will be used for IP Address Management(IPAM). + // type is the type of IPAM module will be used for IP Address Management(IPAM). // The supported values are IPAMTypeDHCP, IPAMTypeStatic Type IPAMType `json:"type"` - // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic // +optional StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` } @@ -353,7 +354,7 @@ type AdditionalNetworkDefinition struct { // name is the name of the network. This will be populated in the resulting CRD // This must be unique. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // namespace is the namespace of the network. This will be populated in the resulting CRD @@ -364,7 +365,7 @@ type AdditionalNetworkDefinition struct { // NetworkAttachmentDefinition CRD RawCNIConfig string `json:"rawCNIConfig,omitempty"` - // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan // +optional SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` } @@ -410,7 +411,7 @@ type OVNKubernetesConfig struct { // +kubebuilder:validation:Minimum=1 // +optional GenevePort *uint32 `json:"genevePort,omitempty"` - // HybridOverlayConfig configures an additional overlay network for peers that are + // hybridOverlayConfig configures an additional overlay network for peers that are // not using OVN. // +optional HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` @@ -440,7 +441,7 @@ type OVNKubernetesConfig struct { // any other subnet being used by OpenShift or by the node network. The size of the // subnet must be larger than the number of nodes. The value cannot be changed // after installation. - // Default is fd98::/48 + // Default is fd98::/64 // +optional V6InternalSubnet string `json:"v6InternalSubnet,omitempty"` // egressIPConfig holds the configuration for EgressIP options. @@ -529,7 +530,7 @@ type IPv6OVNKubernetesConfig struct { // subnet must be larger than the number of nodes. The value cannot be changed // after installation. // The subnet must be large enough to accomadate one IP per node in your cluster - // The current default value is fd98::/48 + // The current default value is fd98::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted // +kubebuilder:validation:MaxLength=48 @@ -540,16 +541,18 @@ type IPv6OVNKubernetesConfig struct { } type HybridOverlayConfig struct { - // HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + // hybridClusterNetwork defines a network space given to nodes on an additional overlay network. // +listType=atomic HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` - // HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + // hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. // Default is 4789 // +optional HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` } // +kubebuilder:validation:XValidation:rule="self == oldSelf || has(self.mode)",message="ipsecConfig.mode is required" +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Full' ? true : !has(self.full)",message="full is forbidden when mode is not Full" +// +union type IPsecConfig struct { // mode defines the behaviour of the ipsec configuration within the platform. // Valid values are `Disabled`, `External` and `Full`. @@ -561,7 +564,40 @@ type IPsecConfig struct { // this is left to the user to configure. // +kubebuilder:validation:Enum=Disabled;External;Full // +optional + // +unionDiscriminator Mode IPsecMode `json:"mode,omitempty"` + + // full defines configuration parameters for the IPsec `Full` mode. + // This is permitted only when mode is configured with `Full`, + // and forbidden otherwise. + // +unionMember,optional + // +optional + Full *IPsecFullModeConfig `json:"full,omitempty"` +} + +type Encapsulation string + +const ( + // EncapsulationAlways always enable UDP encapsulation regardless of whether NAT is detected. + EncapsulationAlways = "Always" + // EncapsulationAuto enable UDP encapsulation based on the detection of NAT. + EncapsulationAuto = "Auto" +) + +// IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode. +// +kubebuilder:validation:MinProperties:=1 +type IPsecFullModeConfig struct { + // encapsulation option to configure libreswan on how inter-pod traffic across nodes + // are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + // for the encapsulation. + // Valid values are Always, Auto and omitted. + // Always means enable UDP encapsulation regardless of whether NAT is detected. + // Auto means enable UDP encapsulation based on the detection of NAT. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is Auto. + // +kubebuilder:validation:Enum:=Always;Auto + // +optional + Encapsulation Encapsulation `json:"encapsulation,omitempty"` } type IPForwardingMode string @@ -577,14 +613,14 @@ const ( // GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides type GatewayConfig struct { - // RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + // routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port // into the host before sending it out. If this is not set, traffic will always egress directly // from OVN to outside without touching the host stack. Setting this to true means hardware // offload will not be supported. Default is false if GatewayConfig is specified. // +kubebuilder:default:=false // +optional RoutingViaHost bool `json:"routingViaHost,omitempty"` - // IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + // ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). // By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other // IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across // OVN-Kubernetes managed interfaces, then set this field to "Global". @@ -862,7 +898,7 @@ type AdditionalRoutingCapabilities struct { // is currrently "FRR" which provides FRR routing capabilities through the // deployment of FRR. // +listType=atomic - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=1 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" diff --git a/vendor/github.com/openshift/api/operator/v1/types_olm.go b/vendor/github.com/openshift/api/operator/v1/types_olm.go new file mode 100644 index 000000000..07c94ece2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_olm.go @@ -0,0 +1,61 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLM provides information to configure an operator to manage the OLM controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=olms,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=false +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 +// +openshift:enable:FeatureGate=NewOLM +// +openshift:capability=OperatorLifecycleManagerV1 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'" +type OLM struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + //+kubebuilder:validation:Required + Spec OLMSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OLMStatus `json:"status"` +} + +type OLMSpec struct { + OperatorSpec `json:",inline"` +} + +type OLMStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLMList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OLMList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OLM `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go index cd2c8a588..a96e033cb 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -25,7 +25,6 @@ type OpenShiftAPIServer struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the OpenShift API Server. - // +kubebuilder:validation:Required // +required Spec OpenShiftAPIServerSpec `json:"spec"` @@ -55,6 +54,6 @@ type OpenShiftAPIServerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []OpenShiftAPIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go index 8e8929a90..8a553a057 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -24,7 +24,6 @@ type OpenShiftControllerManager struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec OpenShiftControllerManagerSpec `json:"spec"` // +optional @@ -52,6 +51,6 @@ type OpenShiftControllerManagerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []OpenShiftControllerManager `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go index 448c458c1..cfb04e8d9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -25,7 +25,6 @@ type KubeScheduler struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes Scheduler - // +kubebuilder:validation:Required // +required Spec KubeSchedulerSpec `json:"spec"` @@ -55,6 +54,6 @@ type KubeSchedulerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeScheduler `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go index e4d8d1d7a..48534d4c6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -25,7 +25,6 @@ type ServiceCA struct { metav1.ObjectMeta `json:"metadata"` //spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ServiceCASpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -54,6 +53,6 @@ type ServiceCAList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []ServiceCA `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go index 006b8bb99..e058c065a 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -20,7 +20,6 @@ type ServiceCatalogAPIServer struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec ServiceCatalogAPIServerSpec `json:"spec"` // +optional @@ -49,6 +48,6 @@ type ServiceCatalogAPIServerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []ServiceCatalogAPIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go index 859965408..4fe2aa46a 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -20,7 +20,6 @@ type ServiceCatalogControllerManager struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec ServiceCatalogControllerManagerSpec `json:"spec"` // +optional @@ -49,6 +48,6 @@ type ServiceCatalogControllerManagerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []ServiceCatalogControllerManager `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go index aa48b0c84..69691a83a 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_storage.go +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -25,7 +25,6 @@ type Storage struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec StorageSpec `json:"spec"` @@ -47,7 +46,7 @@ const ( type StorageSpec struct { OperatorSpec `json:",inline"` - // VSphereStorageDriver indicates the storage driver to use on VSphere clusters. + // vsphereStorageDriver indicates the storage driver to use on VSphere clusters. // Once this field is set to CSIWithMigrationDriver, it can not be changed. // If this is empty, the platform will choose a good default, // which may change over time without notice. diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 84edc0cab..1257a66e7 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -849,6 +849,22 @@ func (in *ConfigList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { *out = *in @@ -931,6 +947,13 @@ func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { *out = *in + if in.Logos != nil { + in, out := &in.Logos, &out.Logos + *out = make([]Logo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Capabilities != nil { in, out := &in.Capabilities, &out.Capabilities *out = make([]Capability, len(*in)) @@ -1598,6 +1621,27 @@ func (in *FeaturesMigration) DeepCopy() *FeaturesMigration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileReferenceSource) DeepCopyInto(out *FileReferenceSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapFileReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileReferenceSource. +func (in *FileReferenceSource) DeepCopy() *FileReferenceSource { + if in == nil { + return nil + } + out := new(FileReferenceSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { *out = *in @@ -1912,6 +1956,11 @@ func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { *out = *in + if in.Full != nil { + in, out := &in.Full, &out.Full + *out = new(IPsecFullModeConfig) + **out = **in + } return } @@ -1925,6 +1974,22 @@ func (in *IPsecConfig) DeepCopy() *IPsecConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecFullModeConfig) DeepCopyInto(out *IPsecFullModeConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecFullModeConfig. +func (in *IPsecFullModeConfig) DeepCopy() *IPsecFullModeConfig { + if in == nil { + return nil + } + out := new(IPsecFullModeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPv4GatewayConfig) DeepCopyInto(out *IPv4GatewayConfig) { *out = *in @@ -3014,6 +3079,29 @@ func (in *LoggingDestination) DeepCopy() *LoggingDestination { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logo) DeepCopyInto(out *Logo) { + *out = *in + if in.Themes != nil { + in, out := &in.Themes, &out.Themes + *out = make([]Theme, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logo. +func (in *Logo) DeepCopy() *Logo { + if in == nil { + return nil + } + out := new(Logo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { *out = *in @@ -3157,6 +3245,7 @@ func (in *MachineConfigurationStatus) DeepCopyInto(out *MachineConfigurationStat } } in.NodeDisruptionPolicyStatus.DeepCopyInto(&out.NodeDisruptionPolicyStatus) + in.ManagedBootImagesStatus.DeepCopyInto(&out.ManagedBootImagesStatus) return } @@ -3834,6 +3923,101 @@ func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLM) DeepCopyInto(out *OLM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM. +func (in *OLM) DeepCopy() *OLM { + if in == nil { + return nil + } + out := new(OLM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMList) DeepCopyInto(out *OLMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OLM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList. +func (in *OLMList) DeepCopy() *OLMList { + if in == nil { + return nil + } + out := new(OLMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMSpec) DeepCopyInto(out *OLMSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec. +func (in *OLMSpec) DeepCopy() *OLMSpec { + if in == nil { + return nil + } + out := new(OLMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMStatus) DeepCopyInto(out *OLMStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus. +func (in *OLMStatus) DeepCopy() *OLMStatus { + if in == nil { + return nil + } + out := new(OLMStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { *out = *in @@ -3855,7 +4039,7 @@ func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { if in.IPsecConfig != nil { in, out := &in.IPsecConfig, &out.IPsecConfig *out = new(IPsecConfig) - **out = **in + (*in).DeepCopyInto(*out) } if in.PolicyAuditConfig != nil { in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig @@ -5160,6 +5344,23 @@ func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinati return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Theme) DeepCopyInto(out *Theme) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Theme. +func (in *Theme) DeepCopy() *Theme { + if in == nil { + return nil + } + out := new(Theme) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Upstream) DeepCopyInto(out *Upstream) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 9ed897517..81f2a87a9 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -70,10 +70,11 @@ clustercsidrivers.operator.openshift.io: Category: "" FeatureGates: - AWSEFSDriverVolumeMetrics + - VSphereConfigurableMaxAllowedBlockVolumesPerNode - VSphereDriverConfiguration FilenameOperatorName: csi-driver FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_90" + FilenameRunLevel: "0000_50" GroupName: operator.openshift.io HasStatus: true KindName: ClusterCSIDriver @@ -180,6 +181,7 @@ ingresscontrollers.operator.openshift.io: FeatureGates: - IngressControllerLBSubnetsAWS - SetEIPForNLBIngressController + - SetEIPForNLBIngressController+IngressControllerLBSubnetsAWS FilenameOperatorName: ingress FilenameOperatorOrdering: "00" FilenameRunLevel: "0000_50" @@ -346,6 +348,31 @@ networks.operator.openshift.io: TopLevelFeatureGates: [] Version: v1 +olms.operator.openshift.io: + Annotations: + include.release.openshift.io/ibm-cloud-managed: "false" + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1504 + CRDName: olms.operator.openshift.io + Capability: OperatorLifecycleManagerV1 + Category: "" + FeatureGates: + - NewOLM + FilenameOperatorName: operator-lifecycle-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: OLM + Labels: {} + PluralName: olms + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - NewOLM + Version: v1 + openshiftapiservers.operator.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/475 diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 03d9e16ed..a0fa4fe47 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -37,8 +37,8 @@ func (MyOperatorResource) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus provides information about the current state of a particular node managed by this operator.", "nodeName": "nodeName is the name of the node", - "currentRevision": "currentRevision is the generation of the most recently successful deployment", - "targetRevision": "targetRevision is the generation of the deployment we're trying to apply", + "currentRevision": "currentRevision is the generation of the most recently successful deployment. Can not be set on creation of a nodeStatus. Updates must only increase the value.", + "targetRevision": "targetRevision is the generation of the deployment we're trying to apply. Can not be set on creation of a nodeStatus.", "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.", "lastFailedReason": "lastFailedReason is a machine readable failure reason string.", @@ -128,7 +128,7 @@ func (AuthenticationList) SwaggerDoc() map[string]string { } var map_AuthenticationStatus = map[string]string{ - "oauthAPIServer": "OAuthAPIServer holds status specific only to oauth-apiserver", + "oauthAPIServer": "oauthAPIServer holds status specific only to oauth-apiserver", } func (AuthenticationStatus) SwaggerDoc() map[string]string { @@ -136,7 +136,7 @@ func (AuthenticationStatus) SwaggerDoc() map[string]string { } var map_OAuthAPIServerStatus = map[string]string{ - "latestAvailableRevision": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", } func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { @@ -163,7 +163,7 @@ func (CloudCredentialList) SwaggerDoc() map[string]string { var map_CloudCredentialSpec = map[string]string{ "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", - "credentialsMode": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", + "credentialsMode": "credentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", } func (CloudCredentialSpec) SwaggerDoc() map[string]string { @@ -192,7 +192,7 @@ func (Config) SwaggerDoc() map[string]string { var map_ConfigList = map[string]string{ "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ConfigList) SwaggerDoc() map[string]string { @@ -227,6 +227,16 @@ func (CapabilityVisibility) SwaggerDoc() map[string]string { return map_CapabilityVisibility } +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a specific file within a ConfigMap.", + "name": "name is the name of the ConfigMap. name is a required field. Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. Must be at most 253 characters in length.", + "key": "key is the logo key inside the referenced ConfigMap. Must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.). Must be at most 253 characters in length. Must end in a valid file extension. A valid file extension must consist of a period followed by 2 to 5 alpha characters.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + var map_Console = map[string]string{ "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -247,12 +257,13 @@ func (ConsoleConfigRoute) SwaggerDoc() map[string]string { } var map_ConsoleCustomization = map[string]string{ - "": "ConsoleCustomization defines a list of optional configuration for the console UI.", + "": "ConsoleCustomization defines a list of optional configuration for the console UI. Ensure that Logos and CustomLogoFile cannot be set at the same time.", + "logos": "logos is used to replace the OpenShift Masthead and Favicon logos in the console UI with custom logos. logos is an optional field that allows a list of logos. Only one of logos or customLogoFile can be set at a time. If logos is set, customLogoFile must be unset. When specified, there must be at least one entry and no more than 2 entries. Each type must appear only once in the list.", "capabilities": "capabilities defines an array of capabilities that can be interacted with in the console UI. Each capability defines a visual state that can be interacted with the console to render in the UI. Available capabilities are LightspeedButton and GettingStartedBanner. Each of the available capabilities may appear only once in the list.", "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", - "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", + "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a Only one of customLogoFile or logos can be set at a time. ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. The recommended file format for the logo is SVG, but other file formats are allowed if supported by the browser. Deprecated: Use logos instead.", "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs).", "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.", "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.", @@ -314,7 +325,7 @@ func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", - "id": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "id": "id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", "label": "label defines a category display label. It is required and must have 1-64 characters.", "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", } @@ -344,6 +355,16 @@ func (DeveloperConsoleCatalogTypes) SwaggerDoc() map[string]string { return map_DeveloperConsoleCatalogTypes } +var map_FileReferenceSource = map[string]string{ + "": "FileReferenceSource is used by the console to locate the specified file containing a custom logo.", + "from": "from is a required field to specify the source type of the file reference. Allowed values are ConfigMap. When set to ConfigMap, the file will be sourced from a ConfigMap in the openshift-config namespace. The configMap field must be set when from is set to ConfigMap.", + "configMap": "configMap specifies the ConfigMap sourcing details such as the name of the ConfigMap and the key for the file. The ConfigMap must exist in the openshift-config namespace. Required when from is \"ConfigMap\", and forbidden otherwise.", +} + +func (FileReferenceSource) SwaggerDoc() map[string]string { + return map_FileReferenceSource +} + var map_Ingress = map[string]string{ "": "Ingress allows cluster admin to configure alternative ingress for the console.", "consoleURL": "consoleURL is a URL to be used as the base console address. If not specified, the console route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. Make sure that appropriate ingress is set up at this URL. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", @@ -354,6 +375,16 @@ func (Ingress) SwaggerDoc() map[string]string { return map_Ingress } +var map_Logo = map[string]string{ + "": "Logo defines a configuration based on theme modes for the console UI logo.", + "type": "type specifies the type of the logo for the console UI. It determines whether the logo is for the masthead or favicon. type is a required field that allows values of Masthead and Favicon. When set to \"Masthead\", the logo will be used in the masthead and about modal of the console UI. When set to \"Favicon\", the logo will be used as the favicon of the console UI.", + "themes": "themes specifies the themes for the console UI logo. themes is a required field that allows a list of themes. Each item in the themes list must have a unique mode and a source field. Each mode determines whether the logo is for the dark or light mode of the console UI. If a theme is not specified, the default OpenShift logo will be displayed for that theme. There must be at least one entry and no more than 2 entries.", +} + +func (Logo) SwaggerDoc() map[string]string { + return map_Logo +} + var map_Perspective = map[string]string{ "": "Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown", "id": "id defines the id of the perspective. Example: \"dev\", \"admin\". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored.", @@ -423,6 +454,16 @@ func (StatuspageProvider) SwaggerDoc() map[string]string { return map_StatuspageProvider } +var map_Theme = map[string]string{ + "": "Theme defines a theme mode for the console UI.", + "mode": "mode is used to specify what theme mode a logo will apply to in the console UI. mode is a required field that allows values of Dark and Light. When set to Dark, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Dark mode. When set to Light, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Light mode.", + "source": "source is used by the console to locate the specified file containing a custom logo. source is a required field that references a ConfigMap name and key that contains the custom logo file in the openshift-config namespace. You can create it with a command like: - 'oc create configmap custom-logos-config --namespace=openshift-config --from-file=/path/to/file' The ConfigMap key must include the file extension so that the console serves the file with the correct MIME type. The recommended file format for the Masthead and Favicon logos is SVG, but other file formats are allowed if supported by the browser. The logo image size must be less than 1 MB due to constraints on the ConfigMap size. For more information, see the documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/web_console/customizing-web-console#customizing-web-console", +} + +func (Theme) SwaggerDoc() map[string]string { + return map_Theme +} + var map_AWSCSIDriverConfigSpec = map[string]string{ "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.", "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.", @@ -480,7 +521,7 @@ var map_CSIDriverConfigSpec = map[string]string{ "azure": "azure is used to configure the Azure CSI driver.", "gcp": "gcp is used to configure the GCP CSI driver.", "ibmcloud": "ibmcloud is used to configure the IBM Cloud CSI driver.", - "vSphere": "vsphere is used to configure the vsphere CSI driver.", + "vSphere": "vSphere is used to configure the vsphere CSI driver.", } func (CSIDriverConfigSpec) SwaggerDoc() map[string]string { @@ -509,7 +550,7 @@ func (ClusterCSIDriverList) SwaggerDoc() map[string]string { var map_ClusterCSIDriverSpec = map[string]string{ "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", - "storageClassState": "StorageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", + "storageClassState": "storageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", "driverConfig": "driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", } @@ -561,6 +602,7 @@ var map_VSphereCSIDriverConfigSpec = map[string]string{ "globalMaxSnapshotsPerBlockVolume": "globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. Snapshots can not be disabled using this parameter. Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html", "granularMaxSnapshotsPerBlockVolumeInVSAN": "granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VSAN can not be disabled using this parameter.", "granularMaxSnapshotsPerBlockVolumeInVVOL": "granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VVOL can not be disabled using this parameter.", + "maxAllowedBlockVolumesPerNode": "maxAllowedBlockVolumesPerNode is an optional configuration parameter that allows setting a custom value for the limit of the number of PersistentVolumes attached to a node. In vSphere version 7 this limit was set to 59 by default, however in vSphere version 8 this limit was increased to 255. Before increasing this value above 59 the cluster administrator needs to ensure that every node forming the cluster is updated to ESXi version 8 or higher and that all nodes are running the same version. The limit must be between 1 and 255, which matches the vSphere version 8 maximum. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 59, which matches the limit for vSphere version 7.", } func (VSphereCSIDriverConfigSpec) SwaggerDoc() map[string]string { @@ -714,9 +756,9 @@ func (Server) SwaggerDoc() map[string]string { var map_Upstream = map[string]string{ "": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n - For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n - For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.", - "type": "Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", - "address": "Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", - "port": "Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", + "type": "type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", + "address": "address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", + "port": "port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", } func (Upstream) SwaggerDoc() map[string]string { @@ -725,8 +767,8 @@ func (Upstream) SwaggerDoc() map[string]string { var map_UpstreamResolvers = map[string]string{ "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", - "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", - "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "upstreams": "upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", } @@ -747,7 +789,7 @@ func (Etcd) SwaggerDoc() map[string]string { var map_EtcdList = map[string]string{ "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (EtcdList) SwaggerDoc() map[string]string { @@ -1020,24 +1062,25 @@ func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { } var map_IngressControllerSpec = map[string]string{ - "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", - "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", - "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", - "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", - "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", - "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", - "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", - "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", - "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", - "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", - "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", - "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", - "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", - "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", - "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", - "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", - "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", + "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", + "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", + "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", + "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", + "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", + "idleConnectionTerminationPolicy": "idleConnectionTerminationPolicy maps directly to HAProxy's idle-close-on-response option and controls whether HAProxy keeps idle frontend connections open during a soft stop (router reload).\n\nAllowed values for this field are \"Immediate\" and \"Deferred\". The default value is \"Immediate\".\n\nWhen set to \"Immediate\", idle connections are closed immediately during router reloads. This ensures immediate propagation of route changes but may impact clients sensitive to connection resets.\n\nWhen set to \"Deferred\", HAProxy will maintain idle connections during a soft reload instead of closing them immediately. These connections remain open until any of the following occurs:\n\n - A new request is received on the connection, in which\n case HAProxy handles it in the old process and closes\n the connection after sending the response.\n\n - HAProxy's `timeout http-keep-alive` duration expires\n (300 seconds in OpenShift's configuration, not\n configurable).\n\n - The client's keep-alive timeout expires, causing the\n client to close the connection.\n\nSetting Deferred can help prevent errors in clients or load balancers that do not properly handle connection resets. Additionally, this option allows you to retain the pre-2.4 HAProxy behaviour: in HAProxy version 2.2 (OpenShift versions < 4.14), maintaining idle connections during a soft reload was the default behaviour, but starting with HAProxy 2.4, the default changed to closing idle connections immediately.\n\nImportant Consideration:\n\n - Using Deferred will result in temporary inconsistencies\n for the first request on each persistent connection\n after a route update and router reload. This request\n will be processed by the old HAProxy process using its\n old configuration. Subsequent requests will use the\n updated configuration.\n\nOperational Considerations:\n\n - Keeping idle connections open during reloads may lead\n to an accumulation of old HAProxy processes if\n connections remain idle for extended periods,\n especially in environments where frequent reloads\n occur.\n\n - Consider monitoring the number of HAProxy processes in\n the router pods when Deferred is set.\n\n - You may need to enable or adjust the\n `ingress.operator.openshift.io/hard-stop-after`\n duration (configured via an annotation on the\n IngressController resource) in environments with\n frequent reloads to prevent resource exhaustion.", } func (IngressControllerSpec) SwaggerDoc() map[string]string { @@ -1071,7 +1114,7 @@ var map_IngressControllerTuningOptions = map[string]string{ "serverTimeout": "serverTimeout defines how long a connection will be held open while waiting for a server/backend response.\n\nIf unset, the default timeout is 30s", "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", - "connectTimeout": "ConnectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", + "connectTimeout": "connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", @@ -1264,7 +1307,7 @@ func (KubeAPIServer) SwaggerDoc() map[string]string { var map_KubeAPIServerList = map[string]string{ "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeAPIServerList) SwaggerDoc() map[string]string { @@ -1302,7 +1345,7 @@ func (KubeControllerManager) SwaggerDoc() map[string]string { var map_KubeControllerManagerList = map[string]string{ "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeControllerManagerList) SwaggerDoc() map[string]string { @@ -1329,7 +1372,7 @@ func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { var map_KubeStorageVersionMigratorList = map[string]string{ "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { @@ -1350,7 +1393,7 @@ func (MachineConfiguration) SwaggerDoc() map[string]string { var map_MachineConfigurationList = map[string]string{ "": "MachineConfigurationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (MachineConfigurationList) SwaggerDoc() map[string]string { @@ -1358,7 +1401,7 @@ func (MachineConfigurationList) SwaggerDoc() map[string]string { } var map_MachineConfigurationSpec = map[string]string{ - "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, no boot images will be updated.", + "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", } @@ -1370,6 +1413,7 @@ var map_MachineConfigurationStatus = map[string]string{ "observedGeneration": "observedGeneration is the last generation change you've dealt with", "conditions": "conditions is a list of conditions and their status", "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", + "managedBootImagesStatus": "managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is and will be used by Machine Config Controller while performing boot image updates.", } func (MachineConfigurationStatus) SwaggerDoc() map[string]string { @@ -1388,7 +1432,7 @@ func (MachineManager) SwaggerDoc() map[string]string { } var map_MachineManagerSelector = map[string]string{ - "mode": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated.", + "mode": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. None means that every resource matched by the machine manager will not be updated.", "partial": "partial provides label selector(s) that can be used to match machine management resources. Only permitted when mode is set to \"Partial\".", } @@ -1545,7 +1589,7 @@ var map_AdditionalNetworkDefinition = map[string]string{ "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", - "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", + "simpleMacvlanConfig": "simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", } func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { @@ -1572,7 +1616,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_DefaultNetworkDefinition = map[string]string{ "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", - "openshiftSDNConfig": "openShiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", + "openshiftSDNConfig": "openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.", } @@ -1611,8 +1655,8 @@ func (FeaturesMigration) SwaggerDoc() map[string]string { var map_GatewayConfig = map[string]string{ "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides", - "routingViaHost": "RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", - "ipForwarding": "IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", + "routingViaHost": "routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", + "ipForwarding": "ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values.", } @@ -1622,8 +1666,8 @@ func (GatewayConfig) SwaggerDoc() map[string]string { } var map_HybridOverlayConfig = map[string]string{ - "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", - "hybridOverlayVXLANPort": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", + "hybridClusterNetwork": "hybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "hybridOverlayVXLANPort": "hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", } func (HybridOverlayConfig) SwaggerDoc() map[string]string { @@ -1632,8 +1676,8 @@ func (HybridOverlayConfig) SwaggerDoc() map[string]string { var map_IPAMConfig = map[string]string{ "": "IPAMConfig contains configurations for IPAM (IP Address Management)", - "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", - "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", + "type": "type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", } func (IPAMConfig) SwaggerDoc() map[string]string { @@ -1650,12 +1694,22 @@ func (IPFIXConfig) SwaggerDoc() map[string]string { var map_IPsecConfig = map[string]string{ "mode": "mode defines the behaviour of the ipsec configuration within the platform. Valid values are `Disabled`, `External` and `Full`. When 'Disabled', ipsec will not be enabled at the node level. When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), this is left to the user to configure.", + "full": "full defines configuration parameters for the IPsec `Full` mode. This is permitted only when mode is configured with `Full`, and forbidden otherwise.", } func (IPsecConfig) SwaggerDoc() map[string]string { return map_IPsecConfig } +var map_IPsecFullModeConfig = map[string]string{ + "": "IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode.", + "encapsulation": "encapsulation option to configure libreswan on how inter-pod traffic across nodes are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 for the encapsulation. Valid values are Always, Auto and omitted. Always means enable UDP encapsulation regardless of whether NAT is detected. Auto means enable UDP encapsulation based on the detection of NAT. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Auto.", +} + +func (IPsecFullModeConfig) SwaggerDoc() map[string]string { + return map_IPsecFullModeConfig +} + var map_IPv4GatewayConfig = map[string]string{ "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format", @@ -1685,7 +1739,7 @@ func (IPv6GatewayConfig) SwaggerDoc() map[string]string { var map_IPv6OVNKubernetesConfig = map[string]string{ "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accomadate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", - "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/48 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", } func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1756,7 +1810,7 @@ var map_NetworkSpec = map[string]string{ "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", - "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "disableMultiNetwork": "disableMultiNetwork defaults to 'false' and this setting enables the pod multi-networking capability. disableMultiNetwork when set to 'true' at cluster install time does not install the components, typically the Multus CNI and the network-attachment-definition CRD, that enable the pod multi-networking capability. Setting the parameter to 'true' might be useful when you need install third-party CNI plugins, but these plugins are not supported by Red Hat. Changing the parameter value as a postinstallation cluster task has no effect.", "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.", "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when ovn-kubernetes is used and true otherwise.", "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.", @@ -1782,12 +1836,12 @@ var map_OVNKubernetesConfig = map[string]string{ "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", - "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "hybridOverlayConfig": "hybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16", - "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48", + "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/64", "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.", "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", @@ -1845,7 +1899,7 @@ func (SFlowConfig) SwaggerDoc() map[string]string { var map_SimpleMacvlanConfig = map[string]string{ "": "SimpleMacvlanConfig contains configurations for macvlan interface.", "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", - "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", + "ipamConfig": "ipamConfig configures IPAM module will be used for IP Address Management (IPAM).", "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", } @@ -1856,8 +1910,8 @@ func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { var map_StaticIPAMAddresses = map[string]string{ "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", - "address": "Address is the IP address in CIDR format", - "gateway": "Gateway is IP inside of subnet to designate as the gateway", + "address": "address is the IP address in CIDR format", + "gateway": "gateway is IP inside of subnet to designate as the gateway", } func (StaticIPAMAddresses) SwaggerDoc() map[string]string { @@ -1866,9 +1920,9 @@ func (StaticIPAMAddresses) SwaggerDoc() map[string]string { var map_StaticIPAMConfig = map[string]string{ "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", - "addresses": "Addresses configures IP address for the interface", - "routes": "Routes configures IP routes for the interface", - "dns": "DNS configures DNS for the interface", + "addresses": "addresses configures IP address for the interface", + "routes": "routes configures IP routes for the interface", + "dns": "dns configures DNS for the interface", } func (StaticIPAMConfig) SwaggerDoc() map[string]string { @@ -1877,9 +1931,9 @@ func (StaticIPAMConfig) SwaggerDoc() map[string]string { var map_StaticIPAMDNS = map[string]string{ "": "StaticIPAMDNS provides DNS related information for static IPAM", - "nameservers": "Nameservers points DNS servers for IP lookup", - "domain": "Domain configures the domainname the local domain used for short hostname lookups", - "search": "Search configures priority ordered search domains for short hostname lookups", + "nameservers": "nameservers points DNS servers for IP lookup", + "domain": "domain configures the domainname the local domain used for short hostname lookups", + "search": "search configures priority ordered search domains for short hostname lookups", } func (StaticIPAMDNS) SwaggerDoc() map[string]string { @@ -1888,14 +1942,35 @@ func (StaticIPAMDNS) SwaggerDoc() map[string]string { var map_StaticIPAMRoutes = map[string]string{ "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", - "destination": "Destination points the IP route destination", - "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", + "destination": "destination points the IP route destination", + "gateway": "gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", } func (StaticIPAMRoutes) SwaggerDoc() map[string]string { return map_StaticIPAMRoutes } +var map_OLM = map[string]string{ + "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OLM) SwaggerDoc() map[string]string { + return map_OLM +} + +var map_OLMList = map[string]string{ + "": "OLMList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OLMList) SwaggerDoc() map[string]string { + return map_OLMList +} + var map_OpenShiftAPIServer = map[string]string{ "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1910,7 +1985,7 @@ func (OpenShiftAPIServer) SwaggerDoc() map[string]string { var map_OpenShiftAPIServerList = map[string]string{ "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { @@ -1929,7 +2004,7 @@ func (OpenShiftControllerManager) SwaggerDoc() map[string]string { var map_OpenShiftControllerManagerList = map[string]string{ "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { @@ -1950,7 +2025,7 @@ func (KubeScheduler) SwaggerDoc() map[string]string { var map_KubeSchedulerList = map[string]string{ "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeSchedulerList) SwaggerDoc() map[string]string { @@ -1971,7 +2046,7 @@ func (ServiceCA) SwaggerDoc() map[string]string { var map_ServiceCAList = map[string]string{ "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCAList) SwaggerDoc() map[string]string { @@ -1990,7 +2065,7 @@ func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { var map_ServiceCatalogAPIServerList = map[string]string{ "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { @@ -2009,7 +2084,7 @@ func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { var map_ServiceCatalogControllerManagerList = map[string]string{ "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { @@ -2038,7 +2113,7 @@ func (StorageList) SwaggerDoc() map[string]string { var map_StorageSpec = map[string]string{ "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", - "vsphereStorageDriver": "VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", + "vsphereStorageDriver": "vsphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", } func (StorageSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/register.go b/vendor/github.com/openshift/api/operator/v1alpha1/register.go index 0921431c0..3e9b09cce 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/register.go @@ -39,6 +39,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &OLMList{}, &EtcdBackup{}, &EtcdBackupList{}, + &ClusterVersionOperator{}, + &ClusterVersionOperatorList{}, ) return nil diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types.go b/vendor/github.com/openshift/api/operator/v1alpha1/types.go index 4d5a207e6..932e8c583 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types.go @@ -149,7 +149,7 @@ type OperatorStatus struct { type GenericOperatorConfig struct { metav1.TypeMeta `json:",inline"` - // ServingInfo is the HTTP serving information for the controller's endpoints + // servingInfo is the HTTP serving information for the controller's endpoints ServingInfo configv1.HTTPServingInfo `json:"servingInfo,omitempty"` // leaderElection provides information to elect a leader. Only override this if you have a specific need diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterversion.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterversion.go new file mode 100644 index 000000000..ec9cfea9f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterversion.go @@ -0,0 +1,76 @@ +package v1alpha1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterversionoperators,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2044 +// +openshift:enable:FeatureGate=ClusterVersionOperatorConfiguration +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="ClusterVersionOperator is a singleton; the .metadata.name field must be 'cluster'" +type ClusterVersionOperator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Cluster Version Operator. + // +required + Spec ClusterVersionOperatorSpec `json:"spec"` + + // status is the most recently observed status of the Cluster Version Operator. + // +optional + Status ClusterVersionOperatorStatus `json:"status"` +} + +// ClusterVersionOperatorSpec is the specification of the desired behavior of the Cluster Version Operator. +type ClusterVersionOperatorSpec struct { + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel operatorv1.LogLevel `json:"operatorLogLevel,omitempty"` +} + +// ClusterVersionOperatorStatus defines the observed status of the Cluster Version Operator. +type ClusterVersionOperatorStatus struct { + // observedGeneration represents the most recent generation observed by the operator and specifies the version of + // the spec field currently being synced. + // +optional + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="observedGeneration must only increase" + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersionOperatorList is a collection of ClusterVersionOperators. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type ClusterVersionOperatorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterVersionOperators. + // +optional + Items []ClusterVersionOperator `json:"items,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go index 2654f5700..fe56b0eab 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go @@ -23,22 +23,19 @@ type EtcdBackup struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec EtcdBackupSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. - // +kubebuilder:validation:Optional // +optional Status EtcdBackupStatus `json:"status"` } type EtcdBackupSpec struct { - // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the // etcd backup file would be saved // The PVC itself must always be created in the "openshift-etcd" namespace // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. // In the future this would be backups saved across the control-plane master nodes. - // +kubebuilder:validation:Optional // +optional // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="pvcName is immutable once set" PVCName string `json:"pvcName"` @@ -47,16 +44,14 @@ type EtcdBackupSpec struct { // +kubebuilder:validation:Optional type EtcdBackupStatus struct { // conditions provide details on the status of the etcd backup job. - // +patchMergeKey=type - // +patchStrategy=merge // +listType=map // +listMapKey=type // +optional - Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // backupJob is the reference to the Job that executes the backup. // Optional - // +kubebuilder:validation:Optional + // +optional BackupJob *BackupJobReference `json:"backupJob"` } @@ -67,13 +62,13 @@ type BackupJobReference struct { // this is always expected to be "openshift-etcd" since the user provided PVC // is also required to be in "openshift-etcd" // Required - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern:=`^openshift-etcd$` Namespace string `json:"namespace"` // name is the name of the Job. // Required - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go index 0830ed744..d4f7e17e6 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go @@ -25,7 +25,6 @@ type ImageContentSourcePolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageContentSourcePolicySpec `json:"spec"` } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go index f29385b9f..41d160a20 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go @@ -17,6 +17,8 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=olms,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=false +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 // +openshift:enable:FeatureGate=NewOLM @@ -29,7 +31,7 @@ type OLM struct { metav1.ObjectMeta `json:"metadata"` //spec holds user settable values for configuration - // +kubebuilder:validation:Required + // +required Spec OLMSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. // +optional @@ -57,6 +59,6 @@ type OLMList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []OLM `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go index 08ef2811a..f8daa0576 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,99 @@ func (in *BackupJobReference) DeepCopy() *BackupJobReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperator) DeepCopyInto(out *ClusterVersionOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperator. +func (in *ClusterVersionOperator) DeepCopy() *ClusterVersionOperator { + if in == nil { + return nil + } + out := new(ClusterVersionOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperatorList) DeepCopyInto(out *ClusterVersionOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersionOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperatorList. +func (in *ClusterVersionOperatorList) DeepCopy() *ClusterVersionOperatorList { + if in == nil { + return nil + } + out := new(ClusterVersionOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperatorSpec) DeepCopyInto(out *ClusterVersionOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperatorSpec. +func (in *ClusterVersionOperatorSpec) DeepCopy() *ClusterVersionOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterVersionOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperatorStatus) DeepCopyInto(out *ClusterVersionOperatorStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperatorStatus. +func (in *ClusterVersionOperatorStatus) DeepCopy() *ClusterVersionOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterVersionOperatorStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 30c058236..0d595be80 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -1,3 +1,26 @@ +clusterversionoperators.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2044 + CRDName: clusterversionoperators.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ClusterVersionOperatorConfiguration + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: operator.openshift.io + HasStatus: true + KindName: ClusterVersionOperator + Labels: {} + PluralName: clusterversionoperators + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - ClusterVersionOperatorConfiguration + Version: v1alpha1 + etcdbackups.operator.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1482 @@ -44,7 +67,9 @@ imagecontentsourcepolicies.operator.openshift.io: Version: v1alpha1 olms.operator.openshift.io: - Annotations: {} + Annotations: + include.release.openshift.io/ibm-cloud-managed: "false" + include.release.openshift.io/self-managed-high-availability: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/1504 CRDName: olms.operator.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go index c8cce688f..9060bf998 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -44,7 +44,7 @@ func (GenerationHistory) SwaggerDoc() map[string]string { var map_GenericOperatorConfig = map[string]string{ "": "GenericOperatorConfig provides information to configure an operator\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", "authentication": "authentication allows configuration of authentication for the endpoints", "authorization": "authorization allows configuration of authentication for the endpoints", @@ -135,6 +135,45 @@ func (VersionAvailability) SwaggerDoc() map[string]string { return map_VersionAvailability } +var map_ClusterVersionOperator = map[string]string{ + "": "ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Cluster Version Operator.", + "status": "status is the most recently observed status of the Cluster Version Operator.", +} + +func (ClusterVersionOperator) SwaggerDoc() map[string]string { + return map_ClusterVersionOperator +} + +var map_ClusterVersionOperatorList = map[string]string{ + "": "ClusterVersionOperatorList is a collection of ClusterVersionOperators.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterVersionOperators.", +} + +func (ClusterVersionOperatorList) SwaggerDoc() map[string]string { + return map_ClusterVersionOperatorList +} + +var map_ClusterVersionOperatorSpec = map[string]string{ + "": "ClusterVersionOperatorSpec is the specification of the desired behavior of the Cluster Version Operator.", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", +} + +func (ClusterVersionOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionOperatorSpec +} + +var map_ClusterVersionOperatorStatus = map[string]string{ + "": "ClusterVersionOperatorStatus defines the observed status of the Cluster Version Operator.", + "observedGeneration": "observedGeneration represents the most recent generation observed by the operator and specifies the version of the spec field currently being synced.", +} + +func (ClusterVersionOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionOperatorStatus +} + var map_BackupJobReference = map[string]string{ "": "BackupJobReference holds a reference to the batch/v1 Job created to run the etcd backup", "namespace": "namespace is the namespace of the Job. this is always expected to be \"openshift-etcd\" since the user provided PVC is also required to be in \"openshift-etcd\" Required", @@ -164,7 +203,7 @@ func (EtcdBackupList) SwaggerDoc() map[string]string { } var map_EtcdBackupSpec = map[string]string{ - "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", + "pvcName": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", } func (EtcdBackupSpec) SwaggerDoc() map[string]string { @@ -232,7 +271,7 @@ func (OLM) SwaggerDoc() map[string]string { var map_OLMList = map[string]string{ "": "OLMList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (OLMList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go index f4b48e854..ba92985c1 100644 --- a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go @@ -26,19 +26,17 @@ type PodNetworkConnectivityCheck struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // Spec defines the source and target of the connectivity check - // +kubebuilder:validation:Required + // spec defines the source and target of the connectivity check // +required Spec PodNetworkConnectivityCheckSpec `json:"spec"` - // Status contains the observed status of the connectivity check + // status contains the observed status of the connectivity check // +optional Status PodNetworkConnectivityCheckStatus `json:"status,omitempty"` } type PodNetworkConnectivityCheckSpec struct { - // SourcePod names the pod from which the condition will be checked - // +kubebuilder:validation:Required + // sourcePod names the pod from which the condition will be checked // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` // +required SourcePod string `json:"sourcePod"` @@ -46,7 +44,6 @@ type PodNetworkConnectivityCheckSpec struct { // EndpointAddress to check. A TCP address of the form host:port. Note that // if host is a DNS name, then the check would fail if the DNS name cannot // be resolved. Specify an IP address for host to bypass DNS name lookup. - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\S+:\d*$` // +required TargetEndpoint string `json:"targetEndpoint"` @@ -62,19 +59,19 @@ type PodNetworkConnectivityCheckSpec struct { // +k8s:deepcopy-gen=true type PodNetworkConnectivityCheckStatus struct { - // Successes contains logs successful check actions + // successes contains logs successful check actions // +optional Successes []LogEntry `json:"successes,omitempty"` - // Failures contains logs of unsuccessful check actions + // failures contains logs of unsuccessful check actions // +optional Failures []LogEntry `json:"failures,omitempty"` - // Outages contains logs of time periods of outages + // outages contains logs of time periods of outages // +optional Outages []OutageEntry `json:"outages,omitempty"` - // Conditions summarize the status of the check + // conditions summarize the status of the check // +patchMergeKey=type // +patchStrategy=merge // +optional @@ -84,25 +81,23 @@ type PodNetworkConnectivityCheckStatus struct { // LogEntry records events type LogEntry struct { // Start time of check action. - // +kubebuilder:validation:Required // +required // +nullable Start metav1.Time `json:"time"` - // Success indicates if the log entry indicates a success or failure. - // +kubebuilder:validation:Required + // success indicates if the log entry indicates a success or failure. // +required Success bool `json:"success"` - // Reason for status in a machine readable format. + // reason for status in a machine readable format. // +optional Reason string `json:"reason,omitempty"` - // Message explaining status in a human readable format. + // message explaining status in a human readable format. // +optional Message string `json:"message,omitempty"` - // Latency records how long the action mentioned in the entry took. + // latency records how long the action mentioned in the entry took. // +optional // +nullable Latency metav1.Duration `json:"latency,omitempty"` @@ -111,28 +106,27 @@ type LogEntry struct { // OutageEntry records time period of an outage type OutageEntry struct { - // Start of outage detected - // +kubebuilder:validation:Required + // start of outage detected // +required // +nullable Start metav1.Time `json:"start"` - // End of outage detected + // end of outage detected // +optional // +nullable End metav1.Time `json:"end,omitempty"` - // StartLogs contains log entries related to the start of this outage. Should contain + // startLogs contains log entries related to the start of this outage. Should contain // the original failure, any entries where the failure mode changed. // +optional StartLogs []LogEntry `json:"startLogs,omitempty"` - // EndLogs contains log entries related to the end of this outage. Should contain the success + // endLogs contains log entries related to the end of this outage. Should contain the success // entry that resolved the outage and possibly a few of the failure log entries that preceded it. // +optional EndLogs []LogEntry `json:"endLogs,omitempty"` - // Message summarizes outage details in a human readable format. + // message summarizes outage details in a human readable format. // +optional Message string `json:"message,omitempty"` } @@ -141,26 +135,23 @@ type OutageEntry struct { // +k8s:deepcopy-gen=true type PodNetworkConnectivityCheckCondition struct { - // Type of the condition - // +kubebuilder:validation:Required + // type of the condition // +required Type PodNetworkConnectivityCheckConditionType `json:"type"` - // Status of the condition - // +kubebuilder:validation:Required + // status of the condition // +required Status metav1.ConditionStatus `json:"status"` - // Reason for the condition's last status transition in a machine readable format. + // reason for the condition's last status transition in a machine readable format. // +optional Reason string `json:"reason,omitempty"` - // Message indicating details about last transition in a human readable format. + // message indicating details about last transition in a human readable format. // +optional Message string `json:"message,omitempty"` // Last time the condition transitioned from one status to another. - // +kubebuilder:validation:Required // +required // +nullable LastTransitionTime metav1.Time `json:"lastTransitionTime"` @@ -193,6 +184,6 @@ type PodNetworkConnectivityCheckList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []PodNetworkConnectivityCheck `json:"items"` } diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go index 5ecc5e48a..f6cd1975d 100644 --- a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go @@ -14,10 +14,10 @@ package v1alpha1 var map_LogEntry = map[string]string{ "": "LogEntry records events", "time": "Start time of check action.", - "success": "Success indicates if the log entry indicates a success or failure.", - "reason": "Reason for status in a machine readable format.", - "message": "Message explaining status in a human readable format.", - "latency": "Latency records how long the action mentioned in the entry took.", + "success": "success indicates if the log entry indicates a success or failure.", + "reason": "reason for status in a machine readable format.", + "message": "message explaining status in a human readable format.", + "latency": "latency records how long the action mentioned in the entry took.", } func (LogEntry) SwaggerDoc() map[string]string { @@ -26,11 +26,11 @@ func (LogEntry) SwaggerDoc() map[string]string { var map_OutageEntry = map[string]string{ "": "OutageEntry records time period of an outage", - "start": "Start of outage detected", - "end": "End of outage detected", - "startLogs": "StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", - "endLogs": "EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", - "message": "Message summarizes outage details in a human readable format.", + "start": "start of outage detected", + "end": "end of outage detected", + "startLogs": "startLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", + "endLogs": "endLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", + "message": "message summarizes outage details in a human readable format.", } func (OutageEntry) SwaggerDoc() map[string]string { @@ -40,8 +40,8 @@ func (OutageEntry) SwaggerDoc() map[string]string { var map_PodNetworkConnectivityCheck = map[string]string{ "": "PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the source and target of the connectivity check", - "status": "Status contains the observed status of the connectivity check", + "spec": "spec defines the source and target of the connectivity check", + "status": "status contains the observed status of the connectivity check", } func (PodNetworkConnectivityCheck) SwaggerDoc() map[string]string { @@ -50,10 +50,10 @@ func (PodNetworkConnectivityCheck) SwaggerDoc() map[string]string { var map_PodNetworkConnectivityCheckCondition = map[string]string{ "": "PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity.", - "type": "Type of the condition", - "status": "Status of the condition", - "reason": "Reason for the condition's last status transition in a machine readable format.", - "message": "Message indicating details about last transition in a human readable format.", + "type": "type of the condition", + "status": "status of the condition", + "reason": "reason for the condition's last status transition in a machine readable format.", + "message": "message indicating details about last transition in a human readable format.", "lastTransitionTime": "Last time the condition transitioned from one status to another.", } @@ -64,7 +64,7 @@ func (PodNetworkConnectivityCheckCondition) SwaggerDoc() map[string]string { var map_PodNetworkConnectivityCheckList = map[string]string{ "": "PodNetworkConnectivityCheckList is a collection of PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (PodNetworkConnectivityCheckList) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (PodNetworkConnectivityCheckList) SwaggerDoc() map[string]string { } var map_PodNetworkConnectivityCheckSpec = map[string]string{ - "sourcePod": "SourcePod names the pod from which the condition will be checked", + "sourcePod": "sourcePod names the pod from which the condition will be checked", "targetEndpoint": "EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup.", "tlsClientCert": "TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource.", } @@ -82,10 +82,10 @@ func (PodNetworkConnectivityCheckSpec) SwaggerDoc() map[string]string { } var map_PodNetworkConnectivityCheckStatus = map[string]string{ - "successes": "Successes contains logs successful check actions", - "failures": "Failures contains logs of unsuccessful check actions", - "outages": "Outages contains logs of time periods of outages", - "conditions": "Conditions summarize the status of the check", + "successes": "successes contains logs successful check actions", + "failures": "failures contains logs of unsuccessful check actions", + "outages": "outages contains logs of time periods of outages", + "conditions": "conditions summarize the status of the check", } func (PodNetworkConnectivityCheckStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/osin/v1/types.go b/vendor/github.com/openshift/api/osin/v1/types.go index 0ea4be1ba..35eb3ee8b 100644 --- a/vendor/github.com/openshift/api/osin/v1/types.go +++ b/vendor/github.com/openshift/api/osin/v1/types.go @@ -473,7 +473,7 @@ type TokenConfig struct { type SessionSecrets struct { metav1.TypeMeta `json:",inline"` - // Secrets is a list of secrets + // secrets is a list of secrets // New sessions are signed and encrypted using the first secret. // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. Secrets []SessionSecret `json:"secrets"` @@ -481,8 +481,8 @@ type SessionSecrets struct { // SessionSecret is a secret used to authenticate/decrypt cookie-based sessions type SessionSecret struct { - // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + // authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. Authentication string `json:"authentication"` - // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + // encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- Encryption string `json:"encryption"` } diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go index 0bffa8265..890928a7a 100644 --- a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go @@ -248,8 +248,8 @@ func (SessionConfig) SwaggerDoc() map[string]string { var map_SessionSecret = map[string]string{ "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", - "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", - "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", + "authentication": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", } func (SessionSecret) SwaggerDoc() map[string]string { @@ -258,7 +258,7 @@ func (SessionSecret) SwaggerDoc() map[string]string { var map_SessionSecrets = map[string]string{ "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", + "secrets": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", } func (SessionSecrets) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto index 762dc99c6..d1ffbc341 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.proto +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -32,10 +32,10 @@ message Project { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the behavior of the Namespace. + // spec defines the behavior of the Namespace. optional ProjectSpec spec = 2; - // Status describes the current status of a Namespace + // status describes the current status of a Namespace // +optional optional ProjectStatus status = 3; } @@ -49,7 +49,7 @@ message ProjectList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of projects + // items is the list of projects repeated Project items = 2; } @@ -62,22 +62,22 @@ message ProjectRequest { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // DisplayName is the display name to apply to a project + // displayName is the display name to apply to a project optional string displayName = 2; - // Description is the description to apply to a project + // description is the description to apply to a project optional string description = 3; } // ProjectSpec describes the attributes on a Project message ProjectSpec { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + // finalizers is an opaque list of values that must be empty to permanently remove object from storage repeated string finalizers = 1; } // ProjectStatus is information about the current status of a Project message ProjectStatus { - // Phase is the current lifecycle phase of the project + // phase is the current lifecycle phase of the project // +optional optional string phase = 1; diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go index 9c17a5dea..5e69b775b 100644 --- a/vendor/github.com/openshift/api/project/v1/types.go +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -18,7 +18,7 @@ type ProjectList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of projects + // items is the list of projects Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -36,13 +36,13 @@ const ( // ProjectSpec describes the attributes on a Project type ProjectSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + // finalizers is an opaque list of values that must be empty to permanently remove object from storage Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"` } // ProjectStatus is information about the current status of a Project type ProjectStatus struct { - // Phase is the current lifecycle phase of the project + // phase is the current lifecycle phase of the project // +optional Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` @@ -79,10 +79,10 @@ type Project struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the behavior of the Namespace. + // spec defines the behavior of the Namespace. Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current status of a Namespace + // status describes the current status of a Namespace // +optional Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -104,8 +104,8 @@ type ProjectRequest struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // DisplayName is the display name to apply to a project + // displayName is the display name to apply to a project DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` - // Description is the description to apply to a project + // description is the description to apply to a project Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` } diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go index 890e651d7..b764eafac 100644 --- a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go @@ -14,8 +14,8 @@ package v1 var map_Project = map[string]string{ "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace.", - "status": "Status describes the current status of a Namespace", + "spec": "spec defines the behavior of the Namespace.", + "status": "status describes the current status of a Namespace", } func (Project) SwaggerDoc() map[string]string { @@ -25,7 +25,7 @@ func (Project) SwaggerDoc() map[string]string { var map_ProjectList = map[string]string{ "": "ProjectList is a list of Project objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of projects", + "items": "items is the list of projects", } func (ProjectList) SwaggerDoc() map[string]string { @@ -35,8 +35,8 @@ func (ProjectList) SwaggerDoc() map[string]string { var map_ProjectRequest = map[string]string{ "": "ProjectRequest is the set of options necessary to fully qualify a project request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "displayName": "DisplayName is the display name to apply to a project", - "description": "Description is the description to apply to a project", + "displayName": "displayName is the display name to apply to a project", + "description": "description is the description to apply to a project", } func (ProjectRequest) SwaggerDoc() map[string]string { @@ -45,7 +45,7 @@ func (ProjectRequest) SwaggerDoc() map[string]string { var map_ProjectSpec = map[string]string{ "": "ProjectSpec describes the attributes on a Project", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage", + "finalizers": "finalizers is an opaque list of values that must be empty to permanently remove object from storage", } func (ProjectSpec) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (ProjectSpec) SwaggerDoc() map[string]string { var map_ProjectStatus = map[string]string{ "": "ProjectStatus is information about the current status of a Project", - "phase": "Phase is the current lifecycle phase of the project", + "phase": "phase is the current lifecycle phase of the project", "conditions": "Represents the latest available observations of the project current state.", } diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto index d08e8f0f9..fb7fed242 100644 --- a/vendor/github.com/openshift/api/quota/v1/generated.proto +++ b/vendor/github.com/openshift/api/quota/v1/generated.proto @@ -23,10 +23,10 @@ message AppliedClusterResourceQuota { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the desired quota + // spec defines the desired quota optional ClusterResourceQuotaSpec spec = 2; - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage optional ClusterResourceQuotaStatus status = 3; } @@ -39,7 +39,7 @@ message AppliedClusterResourceQuotaList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of AppliedClusterResourceQuota + // items is a list of AppliedClusterResourceQuota repeated AppliedClusterResourceQuota items = 2; } @@ -59,10 +59,10 @@ message ClusterResourceQuota { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the desired quota + // spec defines the desired quota optional ClusterResourceQuotaSpec spec = 2; - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage optional ClusterResourceQuotaStatus status = 3; } @@ -75,7 +75,7 @@ message ClusterResourceQuotaList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of ClusterResourceQuotas + // items is a list of ClusterResourceQuotas repeated ClusterResourceQuota items = 2; } @@ -96,22 +96,22 @@ message ClusterResourceQuotaSelector { // ClusterResourceQuotaSpec defines the desired quota restrictions message ClusterResourceQuotaSpec { - // Selector is the selector used to match projects. + // selector is the selector used to match projects. // It should only select active projects on the scale of dozens (though it can select // many more less active projects). These projects will contend on object creation through // this resource. optional ClusterResourceQuotaSelector selector = 1; - // Quota defines the desired quota + // quota defines the desired quota optional .k8s.io.api.core.v1.ResourceQuotaSpec quota = 2; } // ClusterResourceQuotaStatus defines the actual enforced quota and its current usage message ClusterResourceQuotaStatus { - // Total defines the actual enforced quota and its current usage across all projects + // total defines the actual enforced quota and its current usage across all projects optional .k8s.io.api.core.v1.ResourceQuotaStatus total = 1; - // Namespaces slices the usage by project. This division allows for quick resolution of + // namespaces slices the usage by project. This division allows for quick resolution of // deletion reconciliation inside of a single project without requiring a recalculation // across all projects. This can be used to pull the deltas for a given project. // +optional @@ -121,10 +121,10 @@ message ClusterResourceQuotaStatus { // ResourceQuotaStatusByNamespace gives status for a particular project message ResourceQuotaStatusByNamespace { - // Namespace the project this status applies to + // namespace the project this status applies to optional string namespace = 1; - // Status indicates how many resources have been consumed by this project + // status indicates how many resources have been consumed by this project optional .k8s.io.api.core.v1.ResourceQuotaStatus status = 2; } diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go index 7a29ddcd4..0cfb85f87 100644 --- a/vendor/github.com/openshift/api/quota/v1/types.go +++ b/vendor/github.com/openshift/api/quota/v1/types.go @@ -27,22 +27,22 @@ type ClusterResourceQuota struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired quota + // spec defines the desired quota Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ClusterResourceQuotaSpec defines the desired quota restrictions type ClusterResourceQuotaSpec struct { - // Selector is the selector used to match projects. + // selector is the selector used to match projects. // It should only select active projects on the scale of dozens (though it can select // many more less active projects). These projects will contend on object creation through // this resource. Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` - // Quota defines the desired quota + // quota defines the desired quota Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"` } @@ -63,10 +63,10 @@ type ClusterResourceQuotaSelector struct { // ClusterResourceQuotaStatus defines the actual enforced quota and its current usage type ClusterResourceQuotaStatus struct { - // Total defines the actual enforced quota and its current usage across all projects + // total defines the actual enforced quota and its current usage across all projects Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"` - // Namespaces slices the usage by project. This division allows for quick resolution of + // namespaces slices the usage by project. This division allows for quick resolution of // deletion reconciliation inside of a single project without requiring a recalculation // across all projects. This can be used to pull the deltas for a given project. // +optional @@ -87,7 +87,7 @@ type ClusterResourceQuotaList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of ClusterResourceQuotas + // items is a list of ClusterResourceQuotas Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -96,10 +96,10 @@ type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace // ResourceQuotaStatusByNamespace gives status for a particular project type ResourceQuotaStatusByNamespace struct { - // Namespace the project this status applies to + // namespace the project this status applies to Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Status indicates how many resources have been consumed by this project + // status indicates how many resources have been consumed by this project Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"` } @@ -120,10 +120,10 @@ type AppliedClusterResourceQuota struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired quota + // spec defines the desired quota Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -140,6 +140,6 @@ type AppliedClusterResourceQuotaList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of AppliedClusterResourceQuota + // items is a list of AppliedClusterResourceQuota Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go index 3072671c5..1bb84b817 100644 --- a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go @@ -14,8 +14,8 @@ package v1 var map_AppliedClusterResourceQuota = map[string]string{ "": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the desired quota", - "status": "Status defines the actual enforced quota and its current usage", + "spec": "spec defines the desired quota", + "status": "status defines the actual enforced quota and its current usage", } func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { @@ -25,7 +25,7 @@ func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { var map_AppliedClusterResourceQuotaList = map[string]string{ "": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of AppliedClusterResourceQuota", + "items": "items is a list of AppliedClusterResourceQuota", } func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { @@ -35,8 +35,8 @@ func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { var map_ClusterResourceQuota = map[string]string{ "": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the desired quota", - "status": "Status defines the actual enforced quota and its current usage", + "spec": "spec defines the desired quota", + "status": "status defines the actual enforced quota and its current usage", } func (ClusterResourceQuota) SwaggerDoc() map[string]string { @@ -46,7 +46,7 @@ func (ClusterResourceQuota) SwaggerDoc() map[string]string { var map_ClusterResourceQuotaList = map[string]string{ "": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of ClusterResourceQuotas", + "items": "items is a list of ClusterResourceQuotas", } func (ClusterResourceQuotaList) SwaggerDoc() map[string]string { @@ -65,8 +65,8 @@ func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string { var map_ClusterResourceQuotaSpec = map[string]string{ "": "ClusterResourceQuotaSpec defines the desired quota restrictions", - "selector": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", - "quota": "Quota defines the desired quota", + "selector": "selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", + "quota": "quota defines the desired quota", } func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { @@ -75,8 +75,8 @@ func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { var map_ClusterResourceQuotaStatus = map[string]string{ "": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage", - "total": "Total defines the actual enforced quota and its current usage across all projects", - "namespaces": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", + "total": "total defines the actual enforced quota and its current usage across all projects", + "namespaces": "namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", } func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceQuotaStatusByNamespace = map[string]string{ "": "ResourceQuotaStatusByNamespace gives status for a particular project", - "namespace": "Namespace the project this status applies to", - "status": "Status indicates how many resources have been consumed by this project", + "namespace": "namespace the project this status applies to", + "status": "status indicates how many resources have been consumed by this project", } func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index 2a79b9a5a..adfce0d19 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -74,7 +74,7 @@ message RouteHTTPHeader { // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. // It must be no more than 255 characters in length. // Header name must be unique. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" @@ -85,7 +85,7 @@ message RouteHTTPHeader { optional string name = 1; // action specifies actions to perform on headers, such as setting or deleting headers. - // +kubebuilder:validation:Required + // +required optional RouteHTTPHeaderActionUnion action = 2; } @@ -99,7 +99,7 @@ message RouteHTTPHeaderActionUnion { // Delete allows you to delete HTTP request and response headers. // +unionDiscriminator // +kubebuilder:validation:Enum:=Set;Delete - // +kubebuilder:validation:Required + // +required optional string type = 1; // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. @@ -192,13 +192,13 @@ message RouteHTTPHeaders { // RouteIngress holds information about the places where a route is exposed. message RouteIngress { - // Host is the host string under which the route is exposed; this value is required + // host is the host string under which the route is exposed; this value is required optional string host = 1; // Name is a name chosen by the router to identify itself; this value is required optional string routerName = 2; - // Conditions is the state of the route, may be empty. + // conditions is the state of the route, may be empty. // +listType=map // +listMapKey=type repeated RouteIngressCondition conditions = 3; @@ -214,11 +214,11 @@ message RouteIngress { // RouteIngressCondition contains details for the current condition of this route on a particular // router. message RouteIngressCondition { - // Type is the type of the condition. + // type is the type of the condition. // Currently only Admitted or UnservableInFutureVersions. optional string type = 1; - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. optional string status = 2; @@ -267,7 +267,7 @@ message RouteSetHTTPHeader { // + --- // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. // + See . - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=16384 optional string value = 1; @@ -458,6 +458,8 @@ message TLSConfig { // chain. Do not include a CA certificate. The secret referenced should // be present in the same namespace as that of the Route. // Forbidden when `certificate` is set. + // The router service account needs to be granted with read-only access to this secret, + // please refer to openshift docs for additional details. // // +openshift:enable:FeatureGate=RouteExternalCertificate // +optional diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 941619994..2feb425a2 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -251,7 +251,7 @@ type RouteHTTPHeader struct { // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. // It must be no more than 255 characters in length. // Header name must be unique. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" @@ -262,7 +262,7 @@ type RouteHTTPHeader struct { Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // action specifies actions to perform on headers, such as setting or deleting headers. - // +kubebuilder:validation:Required + // +required Action RouteHTTPHeaderActionUnion `json:"action" protobuf:"bytes,2,opt,name=action"` } @@ -276,7 +276,7 @@ type RouteHTTPHeaderActionUnion struct { // Delete allows you to delete HTTP request and response headers. // +unionDiscriminator // +kubebuilder:validation:Enum:=Set;Delete - // +kubebuilder:validation:Required + // +required Type RouteHTTPHeaderActionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteHTTPHeaderActionType"` // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. @@ -299,7 +299,7 @@ type RouteSetHTTPHeader struct { // + --- // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. // + See . - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=16384 Value string `json:"value" protobuf:"bytes,1,opt,name=value"` @@ -359,11 +359,11 @@ type RouteStatus struct { // RouteIngress holds information about the places where a route is exposed. type RouteIngress struct { - // Host is the host string under which the route is exposed; this value is required + // host is the host string under which the route is exposed; this value is required Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // Name is a name chosen by the router to identify itself; this value is required RouterName string `json:"routerName,omitempty" protobuf:"bytes,2,opt,name=routerName"` - // Conditions is the state of the route, may be empty. + // conditions is the state of the route, may be empty. // +listType=map // +listMapKey=type Conditions []RouteIngressCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` @@ -389,10 +389,10 @@ const ( // RouteIngressCondition contains details for the current condition of this route on a particular // router. type RouteIngressCondition struct { - // Type is the type of the condition. + // type is the type of the condition. // Currently only Admitted or UnservableInFutureVersions. Type RouteIngressConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteIngressConditionType"` - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // (brief) reason for the condition's last transition, and is usually a machine and human @@ -469,6 +469,8 @@ type TLSConfig struct { // chain. Do not include a CA certificate. The secret referenced should // be present in the same namespace as that of the Route. // Forbidden when `certificate` is set. + // The router service account needs to be granted with read-only access to this secret, + // please refer to openshift docs for additional details. // // +openshift:enable:FeatureGate=RouteExternalCertificate // +optional diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go index 56a4e23e3..e6c44a6b0 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -72,9 +72,9 @@ func (RouteHTTPHeaders) SwaggerDoc() map[string]string { var map_RouteIngress = map[string]string{ "": "RouteIngress holds information about the places where a route is exposed.", - "host": "Host is the host string under which the route is exposed; this value is required", + "host": "host is the host string under which the route is exposed; this value is required", "routerName": "Name is a name chosen by the router to identify itself; this value is required", - "conditions": "Conditions is the state of the route, may be empty.", + "conditions": "conditions is the state of the route, may be empty.", "wildcardPolicy": "Wildcard policy is the wildcard policy that was allowed where this route is exposed.", "routerCanonicalHostname": "CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases.", } @@ -85,8 +85,8 @@ func (RouteIngress) SwaggerDoc() map[string]string { var map_RouteIngressCondition = map[string]string{ "": "RouteIngressCondition contains details for the current condition of this route on a particular router.", - "type": "Type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", - "status": "Status is the status of the condition. Can be True, False, Unknown.", + "type": "type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", + "status": "status is the status of the condition. Can be True, False, Unknown.", "reason": "(brief) reason for the condition's last transition, and is usually a machine and human readable constant", "message": "Human readable message indicating details about last transition.", "lastTransitionTime": "RFC 3339 date and time when this condition last transitioned", @@ -179,7 +179,7 @@ var map_TLSConfig = map[string]string{ "caCertificate": "caCertificate provides the cert authority certificate contents", "destinationCACertificate": "destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify.", "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\nIf a route does not specify insecureEdgeTerminationPolicy, then the default behavior is \"None\".\n\n* Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only).\n\n* None - no traffic is allowed on the insecure port (default).\n\n* Redirect - clients are redirected to the secure port.", - "externalCertificate": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set.", + "externalCertificate": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set. The router service account needs to be granted with read-only access to this secret, please refer to openshift docs for additional details.", } func (TLSConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto index be97c467d..eeda6835a 100644 --- a/vendor/github.com/openshift/api/samples/v1/generated.proto +++ b/vendor/github.com/openshift/api/samples/v1/generated.proto @@ -30,7 +30,6 @@ message Config { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // +kubebuilder:validation:Required // +required optional ConfigSpec spec = 2; diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go index c4bf38043..320500b0c 100644 --- a/vendor/github.com/openshift/api/samples/v1/types_config.go +++ b/vendor/github.com/openshift/api/samples/v1/types_config.go @@ -29,7 +29,6 @@ type Config struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // +kubebuilder:validation:Required // +required Spec ConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // +optional diff --git a/vendor/github.com/openshift/api/security/v1/consts.go b/vendor/github.com/openshift/api/security/v1/consts.go index 7e8adf6e6..92147d3c5 100644 --- a/vendor/github.com/openshift/api/security/v1/consts.go +++ b/vendor/github.com/openshift/api/security/v1/consts.go @@ -13,4 +13,9 @@ const ( // MinimallySufficientPodSecurityStandard indicates the PodSecurityStandard that matched the SCCs available to the users of the namespace. MinimallySufficientPodSecurityStandard = "security.openshift.io/MinimallySufficientPodSecurityStandard" + + // ValidatedSCCSubjectTypeAnnotation indicates the subject type that allowed the + // SCC admission. This can be used by controllers to detect potential issues + // between user-driven SCC usage and the ServiceAccount-driven SCC usage. + ValidatedSCCSubjectTypeAnnotation = "security.openshift.io/validated-scc-subject-type" ) diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index fdb879ce0..0e6bb094f 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -14,16 +14,16 @@ option go_package = "github.com/openshift/api/security/v1"; // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. message AllowedFlexVolume { - // Driver is the name of the Flexvolume driver. + // driver is the name of the Flexvolume driver. optional string driver = 1; } // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. message FSGroupStrategyOptions { - // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + // type is the strategy that will dictate what FSGroup is used in the SecurityContext. optional string type = 1; - // Ranges are the allowed ranges of fs groups. If you would like to force a single + // ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. // +listType=atomic repeated IDRange ranges = 2; @@ -32,10 +32,10 @@ message FSGroupStrategyOptions { // IDRange provides a min/max of an allowed range of IDs. // TODO: this could be reused for UIDs. message IDRange { - // Min is the start of the range, inclusive. + // min is the start of the range, inclusive. optional int64 min = 1; - // Max is the end of the range, inclusive. + // max is the end of the range, inclusive. optional int64 max = 2; } @@ -177,23 +177,23 @@ message RangeAllocationList { // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. message RunAsUserStrategyOptions { - // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // type is the strategy that will dictate what RunAsUser is used in the SecurityContext. optional string type = 1; - // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using + // uid is the user id that containers must run as. Required for the MustRunAs strategy if not using // namespace/service account allocated uids. optional int64 uid = 2; - // UIDRangeMin defines the min value for a strategy that allocates by range. + // uidRangeMin defines the min value for a strategy that allocates by range. optional int64 uidRangeMin = 3; - // UIDRangeMax defines the max value for a strategy that allocates by range. + // uidRangeMax defines the max value for a strategy that allocates by range. optional int64 uidRangeMax = 4; } // SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. message SELinuxContextStrategyOptions { - // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + // type is the strategy that will dictate what SELinux context is used in the SecurityContext. optional string type = 1; // seLinuxOptions required to run as; required for MustRunAs @@ -229,7 +229,7 @@ message SecurityContextConstraints { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Priority influences the sort order of SCCs when evaluating which SCCs to try first for + // priority influences the sort order of SCCs when evaluating which SCCs to try first for // a given pod request based on access in the Users and Groups fields. The higher the int, the // higher priority. An unset value is considered a 0 priority. If scores // for multiple SCCs are equal they will be sorted from most restrictive to @@ -238,23 +238,23 @@ message SecurityContextConstraints { // +nullable optional int32 priority = 2; - // AllowPrivilegedContainer determines if a container can request to be run as privileged. + // allowPrivilegedContainer determines if a container can request to be run as privileged. optional bool allowPrivilegedContainer = 3; - // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // defaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable // +listType=atomic repeated string defaultAddCapabilities = 4; - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // requiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable // +listType=atomic repeated string requiredDropCapabilities = 5; - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // allowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. @@ -262,18 +262,18 @@ message SecurityContextConstraints { // +listType=atomic repeated string allowedCapabilities = 6; - // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false optional bool allowHostDirVolumePlugin = 7; - // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable // +listType=atomic repeated string volumes = 8; - // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional @@ -281,16 +281,16 @@ message SecurityContextConstraints { // +listType=atomic repeated AllowedFlexVolume allowedFlexVolumes = 21; - // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. optional bool allowHostNetwork = 9; - // AllowHostPorts determines if the policy allows host ports in the containers. + // allowHostPorts determines if the policy allows host ports in the containers. optional bool allowHostPorts = 10; - // AllowHostPID determines if the policy allows host pid in the containers. + // allowHostPID determines if the policy allows host pid in the containers. optional bool allowHostPID = 11; - // AllowHostIPC determines if the policy allows host ipc in the containers. + // allowHostIPC determines if the policy allows host ipc in the containers. optional bool allowHostIPC = 12; // userNamespaceLevel determines if the policy allows host users in containers. @@ -305,35 +305,35 @@ message SecurityContextConstraints { // +optional optional string userNamespaceLevel = 26; - // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // defaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional // +nullable optional bool defaultAllowPrivilegeEscalation = 22; - // AllowPrivilegeEscalation determines if a pod can request to allow + // allowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional // +nullable optional bool allowPrivilegeEscalation = 23; - // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. // +nullable optional SELinuxContextStrategyOptions seLinuxContext = 13; - // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. // +nullable optional RunAsUserStrategyOptions runAsUser = 14; - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. // +nullable optional SupplementalGroupsStrategyOptions supplementalGroups = 15; - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. // +nullable optional FSGroupStrategyOptions fsGroup = 16; - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the SCC should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it @@ -352,7 +352,7 @@ message SecurityContextConstraints { // +listType=atomic repeated string groups = 19; - // SeccompProfiles lists the allowed profiles that may be set for the pod or + // seccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as @@ -361,7 +361,7 @@ message SecurityContextConstraints { // +listType=atomic repeated string seccompProfiles = 20; - // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. @@ -374,7 +374,7 @@ message SecurityContextConstraints { // +listType=atomic repeated string allowedUnsafeSysctls = 24; - // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. // @@ -410,10 +410,10 @@ message ServiceAccountPodSecurityPolicyReviewStatus { // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. message SupplementalGroupsStrategyOptions { - // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // type is the strategy that will dictate what supplemental groups is used in the SecurityContext. optional string type = 1; - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. // +listType=atomic repeated IDRange ranges = 2; diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 9d0af5c8d..18585e97c 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -45,7 +45,7 @@ type SecurityContextConstraints struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Priority influences the sort order of SCCs when evaluating which SCCs to try first for + // priority influences the sort order of SCCs when evaluating which SCCs to try first for // a given pod request based on access in the Users and Groups fields. The higher the int, the // higher priority. An unset value is considered a 0 priority. If scores // for multiple SCCs are equal they will be sorted from most restrictive to @@ -54,49 +54,49 @@ type SecurityContextConstraints struct { // +nullable Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` - // AllowPrivilegedContainer determines if a container can request to be run as privileged. + // allowPrivilegedContainer determines if a container can request to be run as privileged. AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"` - // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // defaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable // +listType=atomic DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // requiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable // +listType=atomic RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // allowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. // +nullable // +listType=atomic AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` - // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false AllowHostDirVolumePlugin bool `json:"allowHostDirVolumePlugin" protobuf:"varint,7,opt,name=allowHostDirVolumePlugin"` - // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable // +listType=atomic Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` - // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional // +nullable // +listType=atomic AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` - // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` - // AllowHostPorts determines if the policy allows host ports in the containers. + // allowHostPorts determines if the policy allows host ports in the containers. AllowHostPorts bool `json:"allowHostPorts" protobuf:"varint,10,opt,name=allowHostPorts"` - // AllowHostPID determines if the policy allows host pid in the containers. + // allowHostPID determines if the policy allows host pid in the containers. AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"` - // AllowHostIPC determines if the policy allows host ipc in the containers. + // allowHostIPC determines if the policy allows host ipc in the containers. AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"` // userNamespaceLevel determines if the policy allows host users in containers. // Valid values are "AllowHostLevel", "RequirePodLevel", and omitted. @@ -109,29 +109,29 @@ type SecurityContextConstraints struct { // +default="AllowHostLevel" // +optional UserNamespaceLevel NamespaceLevelType `json:"userNamespaceLevel,omitempty" protobuf:"bytes,26,opt,name=userNamespaceLevel"` - // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // defaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional // +nullable DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` - // AllowPrivilegeEscalation determines if a pod can request to allow + // allowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional // +nullable AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` - // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. // +nullable SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` - // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. // +nullable RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. // +nullable SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. // +nullable FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the SCC should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it @@ -149,7 +149,7 @@ type SecurityContextConstraints struct { // +listType=atomic Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` - // SeccompProfiles lists the allowed profiles that may be set for the pod or + // seccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as @@ -158,7 +158,7 @@ type SecurityContextConstraints struct { // +listType=atomic SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` - // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. @@ -170,7 +170,7 @@ type SecurityContextConstraints struct { // +nullable // +listType=atomic AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` - // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. // @@ -223,13 +223,13 @@ var ( // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. type AllowedFlexVolume struct { - // Driver is the name of the Flexvolume driver. + // driver is the name of the Flexvolume driver. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } // SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxContextStrategyOptions struct { - // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + // type is the strategy that will dictate what SELinux context is used in the SecurityContext. Type SELinuxContextStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SELinuxContextStrategyType"` // seLinuxOptions required to run as; required for MustRunAs SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` @@ -237,22 +237,22 @@ type SELinuxContextStrategyOptions struct { // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. type RunAsUserStrategyOptions struct { - // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // type is the strategy that will dictate what RunAsUser is used in the SecurityContext. Type RunAsUserStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=RunAsUserStrategyType"` - // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using + // uid is the user id that containers must run as. Required for the MustRunAs strategy if not using // namespace/service account allocated uids. UID *int64 `json:"uid,omitempty" protobuf:"varint,2,opt,name=uid"` - // UIDRangeMin defines the min value for a strategy that allocates by range. + // uidRangeMin defines the min value for a strategy that allocates by range. UIDRangeMin *int64 `json:"uidRangeMin,omitempty" protobuf:"varint,3,opt,name=uidRangeMin"` - // UIDRangeMax defines the max value for a strategy that allocates by range. + // uidRangeMax defines the max value for a strategy that allocates by range. UIDRangeMax *int64 `json:"uidRangeMax,omitempty" protobuf:"varint,4,opt,name=uidRangeMax"` } // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { - // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + // type is the strategy that will dictate what FSGroup is used in the SecurityContext. Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"` - // Ranges are the allowed ranges of fs groups. If you would like to force a single + // ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. // +listType=atomic Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` @@ -260,9 +260,9 @@ type FSGroupStrategyOptions struct { // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. type SupplementalGroupsStrategyOptions struct { - // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // type is the strategy that will dictate what supplemental groups is used in the SecurityContext. Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"` - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. // +listType=atomic Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` @@ -271,9 +271,9 @@ type SupplementalGroupsStrategyOptions struct { // IDRange provides a min/max of an allowed range of IDs. // TODO: this could be reused for UIDs. type IDRange struct { - // Min is the start of the range, inclusive. + // min is the start of the range, inclusive. Min int64 `json:"min,omitempty" protobuf:"varint,1,opt,name=min"` - // Max is the end of the range, inclusive. + // max is the end of the range, inclusive. Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"` } diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go index 2f242366a..29cddf7e6 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -13,7 +13,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", - "driver": "Driver is the name of the Flexvolume driver.", + "driver": "driver is the name of the Flexvolume driver.", } func (AllowedFlexVolume) SwaggerDoc() map[string]string { @@ -22,8 +22,8 @@ func (AllowedFlexVolume) SwaggerDoc() map[string]string { var map_FSGroupStrategyOptions = map[string]string{ "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "type": "Type is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", + "type": "type is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", } func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { @@ -32,8 +32,8 @@ func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { var map_IDRange = map[string]string{ "": "IDRange provides a min/max of an allowed range of IDs.", - "min": "Min is the start of the range, inclusive.", - "max": "Max is the end of the range, inclusive.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", } func (IDRange) SwaggerDoc() map[string]string { @@ -146,10 +146,10 @@ func (RangeAllocationList) SwaggerDoc() map[string]string { var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", - "type": "Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", - "uid": "UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", - "uidRangeMin": "UIDRangeMin defines the min value for a strategy that allocates by range.", - "uidRangeMax": "UIDRangeMax defines the max value for a strategy that allocates by range.", + "type": "type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "uid": "uid is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", + "uidRangeMin": "uidRangeMin defines the min value for a strategy that allocates by range.", + "uidRangeMax": "uidRangeMax defines the max value for a strategy that allocates by range.", } func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { @@ -158,7 +158,7 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { var map_SELinuxContextStrategyOptions = map[string]string{ "": "SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.", - "type": "Type is the strategy that will dictate what SELinux context is used in the SecurityContext.", + "type": "type is the strategy that will dictate what SELinux context is used in the SecurityContext.", "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs", } @@ -169,31 +169,31 @@ func (SELinuxContextStrategyOptions) SwaggerDoc() map[string]string { var map_SecurityContextConstraints = map[string]string{ "": "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "priority": "Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", - "allowPrivilegedContainer": "AllowPrivilegedContainer determines if a container can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", - "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", - "allowHostDirVolumePlugin": "AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", - "volumes": "Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", - "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", - "allowHostNetwork": "AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "allowHostPorts": "AllowHostPorts determines if the policy allows host ports in the containers.", - "allowHostPID": "AllowHostPID determines if the policy allows host pid in the containers.", - "allowHostIPC": "AllowHostIPC determines if the policy allows host ipc in the containers.", + "priority": "priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", + "allowPrivilegedContainer": "allowPrivilegedContainer determines if a container can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", + "allowHostDirVolumePlugin": "allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", + "volumes": "volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", + "allowHostNetwork": "allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "allowHostPorts": "allowHostPorts determines if the policy allows host ports in the containers.", + "allowHostPID": "allowHostPID determines if the policy allows host pid in the containers.", + "allowHostIPC": "allowHostIPC determines if the policy allows host ipc in the containers.", "userNamespaceLevel": "userNamespaceLevel determines if the policy allows host users in containers. Valid values are \"AllowHostLevel\", \"RequirePodLevel\", and omitted. When \"AllowHostLevel\" is set, a pod author may set `hostUsers` to either `true` or `false`. When \"RequirePodLevel\" is set, a pod author must set `hostUsers` to `false`. When omitted, the default value is \"AllowHostLevel\".", - "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "seLinuxContext": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", - "runAsUser": "RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", - "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "seLinuxContext": "seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", + "runAsUser": "runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "users": "The users who have permissions to use this security context constraints", "groups": "The groups that have permission to use this security context constraints", - "seccompProfiles": "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", - "allowedUnsafeSysctls": "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", + "seccompProfiles": "seccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", } func (SecurityContextConstraints) SwaggerDoc() map[string]string { @@ -221,8 +221,8 @@ func (ServiceAccountPodSecurityPolicyReviewStatus) SwaggerDoc() map[string]strin var map_SupplementalGroupsStrategyOptions = map[string]string{ "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "type": "Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", + "type": "type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", } func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go index ebd8d75ef..3ad1c560f 100644 --- a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go @@ -48,6 +48,6 @@ type ServiceCertSignerOperatorConfigList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - // Items contains the items + // items contains the items Items []ServiceCertSignerOperatorConfig `json:"items"` } diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go index 13b3b7364..5e341b1da 100644 --- a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go @@ -23,7 +23,7 @@ func (ServiceCertSignerOperatorConfig) SwaggerDoc() map[string]string { var map_ServiceCertSignerOperatorConfigList = map[string]string{ "": "ServiceCertSignerOperatorConfigList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCertSignerOperatorConfigList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go index 1eea47d02..2a4a0d1b6 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go @@ -11,19 +11,20 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedConfigMap in the volume attributes: // // spec: -// volumes: -// - name: shared-configmap -// csi: -// driver: csi.sharedresource.openshift.io -// volumeAttributes: -// sharedConfigMap: my-share +// +// volumes: +// - name: shared-configmap +// csi: +// driver: csi.sharedresource.openshift.io +// volumeAttributes: +// sharedConfigMap: my-share // // For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object // within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating // such Role and RoleBinding objects. // -// `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` -// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` +// `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` +// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` // // Shared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users. // @@ -45,7 +46,7 @@ type SharedConfigMap struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired shared configmap - // +kubebuilder:validation:Required + // +required Spec SharedConfigMapSpec `json:"spec,omitempty"` // status is the observed status of the shared configmap @@ -72,10 +73,10 @@ type SharedConfigMapList struct { // SharedConfigMapReference contains information about which ConfigMap to share type SharedConfigMapReference struct { // name represents the name of the ConfigMap that is being referenced. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // namespace represents the namespace where the referenced ConfigMap is located. - // +kubebuilder:validation:Required + // +required Namespace string `json:"namespace"` } @@ -83,7 +84,7 @@ type SharedConfigMapReference struct { // +k8s:openapi-gen=true type SharedConfigMapSpec struct { //configMapRef is a reference to the ConfigMap to share - // +kubebuilder:validation:Required + // +required ConfigMapRef SharedConfigMapReference `json:"configMapRef"` // description is a user readable explanation of what the backing resource provides. Description string `json:"description,omitempty"` @@ -92,7 +93,8 @@ type SharedConfigMapSpec struct { // SharedSecretStatus contains the observed status of the shared resource type SharedConfigMapStatus struct { // conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go index 654106bce..be06f9774 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go @@ -11,19 +11,20 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedSecret in the volume attributes: // // spec: -// volumes: -// - name: shared-secret -// csi: -// driver: csi.sharedresource.openshift.io -// volumeAttributes: -// sharedSecret: my-share +// +// volumes: +// - name: shared-secret +// csi: +// driver: csi.sharedresource.openshift.io +// volumeAttributes: +// sharedSecret: my-share // // For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object // within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating // such Role and RoleBinding objects. // -// `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` -// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` +// `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` +// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` // // Shared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users. // @@ -44,7 +45,7 @@ type SharedSecret struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired shared secret - // +kubebuilder:validation:Required + // +required Spec SharedSecretSpec `json:"spec,omitempty"` // status is the observed status of the shared secret @@ -71,10 +72,10 @@ type SharedSecretList struct { // SharedSecretReference contains information about which Secret to share type SharedSecretReference struct { // name represents the name of the Secret that is being referenced. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // namespace represents the namespace where the referenced Secret is located. - // +kubebuilder:validation:Required + // +required Namespace string `json:"namespace"` } @@ -82,7 +83,7 @@ type SharedSecretReference struct { // +k8s:openapi-gen=true type SharedSecretSpec struct { // secretRef is a reference to the Secret to share - // +kubebuilder:validation:Required + // +required SecretRef SharedSecretReference `json:"secretRef"` // description is a user readable explanation of what the backing resource provides. Description string `json:"description,omitempty"` @@ -91,7 +92,8 @@ type SharedSecretSpec struct { // SharedSecretStatus contains the observed status of the shared resource type SharedSecretStatus struct { // conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go index f432d63f7..ea6334d14 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go @@ -12,7 +12,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE var map_SharedConfigMap = map[string]string{ - "": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n volumes:\n - name: shared-configmap\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n\n\tvolumes:\n\t- name: shared-configmap\n\t csi:\n\t driver: csi.sharedresource.openshift.io\n\t volumeAttributes:\n\t sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n\t`oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n\t`oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the specification of the desired shared configmap", "status": "status is the observed status of the shared configmap", @@ -61,7 +61,7 @@ func (SharedConfigMapStatus) SwaggerDoc() map[string]string { } var map_SharedSecret = map[string]string{ - "": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n volumes:\n - name: shared-secret\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n\n\tvolumes:\n\t- name: shared-secret\n\t csi:\n\t driver: csi.sharedresource.openshift.io\n\t volumeAttributes:\n\t sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n\t`oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n\t`oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the specification of the desired shared secret", "status": "status is the observed status of the shared secret", diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto index 5ff4d7b1d..8f27eb48a 100644 --- a/vendor/github.com/openshift/api/template/v1/generated.proto +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -42,7 +42,7 @@ message BrokerTemplateInstanceList { // BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. message BrokerTemplateInstanceSpec { - // templateinstance is a reference to a TemplateInstance object residing + // templateInstance is a reference to a TemplateInstance object residing // in a namespace. optional .k8s.io.api.core.v1.ObjectReference templateInstance = 1; @@ -50,7 +50,7 @@ message BrokerTemplateInstanceSpec { // containing the necessary template parameters. optional .k8s.io.api.core.v1.ObjectReference secret = 2; - // bindingids is a list of 'binding_id's provided during successive bind + // bindingIDs is a list of 'binding_id's provided during successive bind // calls to the template service broker. repeated string bindingIDs = 3; } @@ -67,17 +67,17 @@ message ExtraValue { // Parameter defines a name/value variable that is to be processed during // the Template to Config transformation. message Parameter { - // Name must be set and it can be referenced in Template + // name must be set and it can be referenced in Template // Items using ${PARAMETER_NAME}. Required. optional string name = 1; // Optional: The name that will show in UI instead of parameter 'Name' optional string displayName = 2; - // Description of a parameter. Optional. + // description of a parameter. Optional. optional string description = 3; - // Value holds the Parameter data. If specified, the generator will be + // value holds the Parameter data. If specified, the generator will be // ignored. The value replaces all occurrences of the Parameter ${Name} // expression during the Template to Config transformation. Optional. optional string value = 4; @@ -101,7 +101,7 @@ message Parameter { // "[a-zA-Z0-9]{8}" | "hW4yQU5i" optional string generate = 5; - // From is an input value for the generator. Optional. + // from is an input value for the generator. Optional. optional string from = 6; // Optional: Indicates the parameter must have a value. Defaults to false. @@ -164,21 +164,21 @@ message TemplateInstance { // TemplateInstanceCondition contains condition information for a // TemplateInstance. message TemplateInstanceCondition { - // Type of the condition, currently Ready or InstantiateFailure. + // type of the condition, currently Ready or InstantiateFailure. optional string type = 1; - // Status of the condition, one of True, False or Unknown. + // status of the condition, one of True, False or Unknown. optional string status = 2; - // LastTransitionTime is the last time a condition status transitioned from + // lastTransitionTime is the last time a condition status transitioned from // one state to another. optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Reason is a brief machine readable explanation for the condition's last + // reason is a brief machine readable explanation for the condition's last // transition. optional string reason = 4; - // Message is a human readable description of the details of the last + // message is a human readable description of the details of the last // transition, complementing reason. optional string message = 5; } @@ -243,7 +243,7 @@ message TemplateInstanceStatus { // TemplateInstance's current state. repeated TemplateInstanceCondition conditions = 1; - // Objects references the objects created by the TemplateInstance. + // objects references the objects created by the TemplateInstance. repeated TemplateInstanceObject objects = 2; } @@ -256,7 +256,7 @@ message TemplateList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of templates + // items is a list of templates repeated Template items = 2; } diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go index 9d95912b2..5510b0f90 100644 --- a/vendor/github.com/openshift/api/template/v1/types.go +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -61,24 +61,24 @@ type TemplateList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of templates + // items is a list of templates Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"` } // Parameter defines a name/value variable that is to be processed during // the Template to Config transformation. type Parameter struct { - // Name must be set and it can be referenced in Template + // name must be set and it can be referenced in Template // Items using ${PARAMETER_NAME}. Required. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional: The name that will show in UI instead of parameter 'Name' DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` - // Description of a parameter. Optional. + // description of a parameter. Optional. Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` - // Value holds the Parameter data. If specified, the generator will be + // value holds the Parameter data. If specified, the generator will be // ignored. The value replaces all occurrences of the Parameter ${Name} // expression during the Template to Config transformation. Optional. Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"` @@ -103,7 +103,7 @@ type Parameter struct { // Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"` - // From is an input value for the generator. Optional. + // from is an input value for the generator. Optional. From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"` // Optional: Indicates the parameter must have a value. Defaults to false. @@ -181,24 +181,24 @@ type TemplateInstanceStatus struct { // TemplateInstance's current state. Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - // Objects references the objects created by the TemplateInstance. + // objects references the objects created by the TemplateInstance. Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` } // TemplateInstanceCondition contains condition information for a // TemplateInstance. type TemplateInstanceCondition struct { - // Type of the condition, currently Ready or InstantiateFailure. + // type of the condition, currently Ready or InstantiateFailure. Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"` - // Status of the condition, one of True, False or Unknown. + // status of the condition, one of True, False or Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` - // LastTransitionTime is the last time a condition status transitioned from + // lastTransitionTime is the last time a condition status transitioned from // one state to another. LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is a brief machine readable explanation for the condition's last + // reason is a brief machine readable explanation for the condition's last // transition. Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` - // Message is a human readable description of the details of the last + // message is a human readable description of the details of the last // transition, complementing reason. Message string `json:"message" protobuf:"bytes,5,opt,name=message"` } @@ -263,7 +263,7 @@ type BrokerTemplateInstance struct { // BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. type BrokerTemplateInstanceSpec struct { - // templateinstance is a reference to a TemplateInstance object residing + // templateInstance is a reference to a TemplateInstance object residing // in a namespace. TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"` @@ -271,7 +271,7 @@ type BrokerTemplateInstanceSpec struct { // containing the necessary template parameters. Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"` - // bindingids is a list of 'binding_id's provided during successive bind + // bindingIDs is a list of 'binding_id's provided during successive bind // calls to the template service broker. BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"` } diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go index 8ed3822c8..761390d02 100644 --- a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go @@ -33,9 +33,9 @@ func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string { var map_BrokerTemplateInstanceSpec = map[string]string{ "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.", - "templateInstance": "templateinstance is a reference to a TemplateInstance object residing in a namespace.", + "templateInstance": "templateInstance is a reference to a TemplateInstance object residing in a namespace.", "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.", - "bindingIDs": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.", + "bindingIDs": "bindingIDs is a list of 'binding_id's provided during successive bind calls to the template service broker.", } func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { @@ -44,12 +44,12 @@ func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { var map_Parameter = map[string]string{ "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.", - "name": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", + "name": "name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", "displayName": "Optional: The name that will show in UI instead of parameter 'Name'", - "description": "Description of a parameter. Optional.", - "value": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", + "description": "description of a parameter. Optional.", + "value": "value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value", - "from": "From is an input value for the generator. Optional.", + "from": "from is an input value for the generator. Optional.", "required": "Optional: Indicates the parameter must have a value. Defaults to false.", } @@ -83,11 +83,11 @@ func (TemplateInstance) SwaggerDoc() map[string]string { var map_TemplateInstanceCondition = map[string]string{ "": "TemplateInstanceCondition contains condition information for a TemplateInstance.", - "type": "Type of the condition, currently Ready or InstantiateFailure.", - "status": "Status of the condition, one of True, False or Unknown.", - "lastTransitionTime": "LastTransitionTime is the last time a condition status transitioned from one state to another.", - "reason": "Reason is a brief machine readable explanation for the condition's last transition.", - "message": "Message is a human readable description of the details of the last transition, complementing reason.", + "type": "type of the condition, currently Ready or InstantiateFailure.", + "status": "status of the condition, one of True, False or Unknown.", + "lastTransitionTime": "lastTransitionTime is the last time a condition status transitioned from one state to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details of the last transition, complementing reason.", } func (TemplateInstanceCondition) SwaggerDoc() map[string]string { @@ -139,7 +139,7 @@ func (TemplateInstanceSpec) SwaggerDoc() map[string]string { var map_TemplateInstanceStatus = map[string]string{ "": "TemplateInstanceStatus describes the current state of a TemplateInstance.", "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.", - "objects": "Objects references the objects created by the TemplateInstance.", + "objects": "objects references the objects created by the TemplateInstance.", } func (TemplateInstanceStatus) SwaggerDoc() map[string]string { @@ -149,7 +149,7 @@ func (TemplateInstanceStatus) SwaggerDoc() map[string]string { var map_TemplateList = map[string]string{ "": "TemplateList is a list of Template objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of templates", + "items": "items is a list of templates", } func (TemplateList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/user/v1/generated.proto b/vendor/github.com/openshift/api/user/v1/generated.proto index c1c2b8156..f07b446ad 100644 --- a/vendor/github.com/openshift/api/user/v1/generated.proto +++ b/vendor/github.com/openshift/api/user/v1/generated.proto @@ -21,7 +21,7 @@ message Group { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Users is the list of users in this group. + // users is the list of users in this group. optional OptionalNames users = 2; } @@ -34,7 +34,7 @@ message GroupList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of groups + // items is the list of groups repeated Group items = 2; } @@ -51,17 +51,17 @@ message Identity { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ProviderName is the source of identity information + // providerName is the source of identity information optional string providerName = 2; - // ProviderUserName uniquely represents this identity in the scope of the provider + // providerUserName uniquely represents this identity in the scope of the provider optional string providerUserName = 3; - // User is a reference to the user this identity is associated with + // user is a reference to the user this identity is associated with // Both Name and UID must be set optional .k8s.io.api.core.v1.ObjectReference user = 4; - // Extra holds extra information about this identity + // extra holds extra information about this identity map extra = 5; } @@ -74,7 +74,7 @@ message IdentityList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of identities + // items is the list of identities repeated Identity items = 2; } @@ -100,14 +100,14 @@ message User { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // FullName is the full name of user + // fullName is the full name of user optional string fullName = 2; - // Identities are the identities associated with this user + // identities are the identities associated with this user // +optional repeated string identities = 3; - // Groups specifies group names this user is a member of. + // groups specifies group names this user is a member of. // This field is deprecated and will be removed in a future release. // Instead, create a Group object containing the name of this User. repeated string groups = 4; @@ -122,10 +122,10 @@ message UserIdentityMapping { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Identity is a reference to an identity + // identity is a reference to an identity optional .k8s.io.api.core.v1.ObjectReference identity = 2; - // User is a reference to a user + // user is a reference to a user optional .k8s.io.api.core.v1.ObjectReference user = 3; } @@ -138,7 +138,7 @@ message UserList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of users + // items is the list of users repeated User items = 2; } diff --git a/vendor/github.com/openshift/api/user/v1/types.go b/vendor/github.com/openshift/api/user/v1/types.go index 7014bbfac..64ae8c830 100644 --- a/vendor/github.com/openshift/api/user/v1/types.go +++ b/vendor/github.com/openshift/api/user/v1/types.go @@ -26,14 +26,14 @@ type User struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // FullName is the full name of user + // fullName is the full name of user FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` - // Identities are the identities associated with this user + // identities are the identities associated with this user // +optional Identities []string `json:"identities,omitempty" protobuf:"bytes,3,rep,name=identities"` - // Groups specifies group names this user is a member of. + // groups specifies group names this user is a member of. // This field is deprecated and will be removed in a future release. // Instead, create a Group object containing the name of this User. Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` @@ -52,7 +52,7 @@ type UserList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of users + // items is the list of users Items []User `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -75,17 +75,17 @@ type Identity struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ProviderName is the source of identity information + // providerName is the source of identity information ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"` - // ProviderUserName uniquely represents this identity in the scope of the provider + // providerUserName uniquely represents this identity in the scope of the provider ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"` - // User is a reference to the user this identity is associated with + // user is a reference to the user this identity is associated with // Both Name and UID must be set User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"` - // Extra holds extra information about this identity + // extra holds extra information about this identity Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` } @@ -102,7 +102,7 @@ type IdentityList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of identities + // items is the list of identities Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,9 +122,9 @@ type UserIdentityMapping struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Identity is a reference to an identity + // identity is a reference to an identity Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"` - // User is a reference to a user + // user is a reference to a user User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` } @@ -152,7 +152,7 @@ type Group struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Users is the list of users in this group. + // users is the list of users in this group. Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"` } @@ -169,6 +169,6 @@ type GroupList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of groups + // items is the list of groups Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go index 5844723a7..d85e7dfc5 100644 --- a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go @@ -14,7 +14,7 @@ package v1 var map_Group = map[string]string{ "": "Group represents a referenceable set of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "users": "Users is the list of users in this group.", + "users": "users is the list of users in this group.", } func (Group) SwaggerDoc() map[string]string { @@ -24,7 +24,7 @@ func (Group) SwaggerDoc() map[string]string { var map_GroupList = map[string]string{ "": "GroupList is a collection of Groups\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of groups", + "items": "items is the list of groups", } func (GroupList) SwaggerDoc() map[string]string { @@ -34,10 +34,10 @@ func (GroupList) SwaggerDoc() map[string]string { var map_Identity = map[string]string{ "": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "providerName": "ProviderName is the source of identity information", - "providerUserName": "ProviderUserName uniquely represents this identity in the scope of the provider", - "user": "User is a reference to the user this identity is associated with Both Name and UID must be set", - "extra": "Extra holds extra information about this identity", + "providerName": "providerName is the source of identity information", + "providerUserName": "providerUserName uniquely represents this identity in the scope of the provider", + "user": "user is a reference to the user this identity is associated with Both Name and UID must be set", + "extra": "extra holds extra information about this identity", } func (Identity) SwaggerDoc() map[string]string { @@ -47,7 +47,7 @@ func (Identity) SwaggerDoc() map[string]string { var map_IdentityList = map[string]string{ "": "IdentityList is a collection of Identities\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of identities", + "items": "items is the list of identities", } func (IdentityList) SwaggerDoc() map[string]string { @@ -57,9 +57,9 @@ func (IdentityList) SwaggerDoc() map[string]string { var map_User = map[string]string{ "": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "fullName": "FullName is the full name of user", - "identities": "Identities are the identities associated with this user", - "groups": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", + "fullName": "fullName is the full name of user", + "identities": "identities are the identities associated with this user", + "groups": "groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", } func (User) SwaggerDoc() map[string]string { @@ -69,8 +69,8 @@ func (User) SwaggerDoc() map[string]string { var map_UserIdentityMapping = map[string]string{ "": "UserIdentityMapping maps a user to an identity\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "identity": "Identity is a reference to an identity", - "user": "User is a reference to a user", + "identity": "identity is a reference to an identity", + "user": "user is a reference to a user", } func (UserIdentityMapping) SwaggerDoc() map[string]string { @@ -80,7 +80,7 @@ func (UserIdentityMapping) SwaggerDoc() map[string]string { var map_UserList = map[string]string{ "": "UserList is a collection of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of users", + "items": "items is the list of users", } func (UserList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/build-machinery-go/.gitignore b/vendor/github.com/openshift/build-machinery-go/.gitignore new file mode 100644 index 000000000..19607d9e6 --- /dev/null +++ b/vendor/github.com/openshift/build-machinery-go/.gitignore @@ -0,0 +1 @@ +*.log.raw diff --git a/vendor/github.com/openshift/build-machinery-go/OWNERS b/vendor/github.com/openshift/build-machinery-go/OWNERS index bb188a02c..1bbac46c7 100644 --- a/vendor/github.com/openshift/build-machinery-go/OWNERS +++ b/vendor/github.com/openshift/build-machinery-go/OWNERS @@ -1,7 +1,9 @@ reviewers: - 2uasimojo - - deads2k - - soltysh + - benluddy + - jsafrane + - sanchezl approvers: - - soltysh - - deads2k + - benluddy + - jsafrane + - sanchezl diff --git a/vendor/github.com/openshift/build-machinery-go/make/operator.example.mk.help.log b/vendor/github.com/openshift/build-machinery-go/make/operator.example.mk.help.log index 47b04e6fe..b636e3926 100644 --- a/vendor/github.com/openshift/build-machinery-go/make/operator.example.mk.help.log +++ b/vendor/github.com/openshift/build-machinery-go/make/operator.example.mk.help.log @@ -13,6 +13,7 @@ image-ocp-openshift-apiserver-operator images telepresence test +test-operator-integration test-unit update update-bindata @@ -21,6 +22,7 @@ update-deps-overrides update-generated update-gofmt update-profile-manifests +update-test-operator-integration verify verify-bindata verify-codegen diff --git a/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/images.mk b/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/images.mk index c53c3f1b9..90096dc5d 100644 --- a/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/images.mk +++ b/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/images.mk @@ -7,15 +7,21 @@ include $(addprefix $(dir $(lastword $(MAKEFILE_LIST))), \ # make images IMAGE_BUILD_EXTRA_FLAGS='-mount ~/projects/origin-repos/4.2/:/etc/yum.repos.d/' IMAGE_BUILD_DEFAULT_FLAGS ?=--allow-pull IMAGE_BUILD_EXTRA_FLAGS ?= +IMAGE_BUILD_BUILDER ?= imagebuilder # $1 - target name # $2 - image ref # $3 - Dockerfile path # $4 - context +# only run ensure-imagebuilder when imagebuilder is used define build-image-internal +ifeq ($(IMAGE_BUILD_BUILDER),imagebuilder) image-$(1): ensure-imagebuilder +else +image-$(1): +endif $(strip \ - imagebuilder \ + $(IMAGE_BUILD_BUILDER) \ $(IMAGE_BUILD_DEFAULT_FLAGS) \ -t $(2) -f $(3) \ diff --git a/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/operator/mom.mk b/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/operator/mom.mk new file mode 100644 index 000000000..21c81afe0 --- /dev/null +++ b/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/operator/mom.mk @@ -0,0 +1,10 @@ +scripts_dir :=$(shell realpath $(dir $(lastword $(MAKEFILE_LIST)))../../../../scripts) + +test-operator-integration: build + bash $(scripts_dir)/test-operator-integration.sh +.PHONY: test-operator-integration + +update-test-operator-integration: build + REPLACE_TEST_OUTPUT=true bash $(scripts_dir)/test-operator-integration.sh + +.PHONY: update-test-operator-integration diff --git a/vendor/github.com/openshift/build-machinery-go/scripts/test-operator-integration.sh b/vendor/github.com/openshift/build-machinery-go/scripts/test-operator-integration.sh new file mode 100644 index 000000000..262bf6010 --- /dev/null +++ b/vendor/github.com/openshift/build-machinery-go/scripts/test-operator-integration.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail +set -x + +# Install multi-operator-manager. This will make sure the latest binary is installed +# If the installation failed, keep going, maybe the binary is available in the system +echo "Installing latest version of multi-operator-manager..." +if ! go install -mod=readonly github.com/openshift/multi-operator-manager/cmd/multi-operator-manager@latest; then + echo "Error: Failed to install multi-operator-manager." +fi + +# Check if the multi-operator-manager is installed; if not, fail +if ! command -v multi-operator-manager &> /dev/null; then + echo "Error: multi-operator-manager binary not available." + exit 1 +fi + +REPLACE_TEST_OUTPUT="${REPLACE_TEST_OUTPUT:-false}" + +# Define the path to the operator binary +MOM_CMD="${MOM_CMD:-multi-operator-manager}" + +# Define input and output directories (can be overridden if necessary) +APPLY_CONFIG_INPUT_DIR="${APPLY_CONFIG_INPUT_DIR:-./test-data/apply-configuration}" +APPLY_CONFIG_OUTPUT_DIR="${ARTIFACT_DIR:-./test-output}" + +# Make sure the output-dir is clean +if [ -d "${APPLY_CONFIG_OUTPUT_DIR}" ]; then + echo "Cleaning up existing ${APPLY_CONFIG_OUTPUT_DIR}" + rm -rf "${APPLY_CONFIG_OUTPUT_DIR}" +fi + +# Assemble the args +APPLY_CONFIG_ARGS=( + test + apply-configuration + --test-dir="$APPLY_CONFIG_INPUT_DIR" + --output-dir="$APPLY_CONFIG_OUTPUT_DIR" +) + +if [ "$REPLACE_TEST_OUTPUT" == "true" ] +then + APPLY_CONFIG_ARGS=("${APPLY_CONFIG_ARGS[@]}" "--replace-expected-output=true") +else + APPLY_CONFIG_ARGS=("${APPLY_CONFIG_ARGS[@]}" "--preserve-policy=KeepAlways") +fi + +# Run the apply-configuration command from the operator +"${MOM_CMD}" "${APPLY_CONFIG_ARGS[@]}" diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go index e10ae5c76..0d2c3e4f8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // APIServerApplyConfiguration represents a declarative configuration of the APIServer type for use // with apply. type APIServerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *APIServerSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.APIServerStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *APIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.APIServerStatus `json:"status,omitempty"` } // APIServer constructs a declarative configuration of the APIServer type for use with @@ -41,18 +41,18 @@ func APIServer(name string) *APIServerApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractAPIServer(aPIServer *apiconfigv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) { +func ExtractAPIServer(aPIServer *configv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) { return extractAPIServer(aPIServer, fieldManager, "") } // ExtractAPIServerStatus is the same as ExtractAPIServer except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractAPIServerStatus(aPIServer *apiconfigv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) { +func ExtractAPIServerStatus(aPIServer *configv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) { return extractAPIServer(aPIServer, fieldManager, "status") } -func extractAPIServer(aPIServer *apiconfigv1.APIServer, fieldManager string, subresource string) (*APIServerApplyConfiguration, error) { +func extractAPIServer(aPIServer *configv1.APIServer, fieldManager string, subresource string) (*APIServerApplyConfiguration, error) { b := &APIServerApplyConfiguration{} err := managedfields.ExtractInto(aPIServer, internal.Parser().Type("com.github.openshift.api.config.v1.APIServer"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractAPIServer(aPIServer *apiconfigv1.APIServer, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithKind(value string) *APIServerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *APIServerApplyConfiguration) WithKind(value string) *APIServerApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithAPIVersion(value string) *APIServerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *APIServerApplyConfiguration) WithAPIVersion(value string) *APIServerApp // If called multiple times, the Name field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithName(value string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *APIServerApplyConfiguration) WithName(value string) *APIServerApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithGenerateName(value string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *APIServerApplyConfiguration) WithGenerateName(value string) *APIServerA // If called multiple times, the Namespace field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithNamespace(value string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *APIServerApplyConfiguration) WithNamespace(value string) *APIServerAppl // If called multiple times, the UID field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithUID(value types.UID) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *APIServerApplyConfiguration) WithUID(value types.UID) *APIServerApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithResourceVersion(value string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *APIServerApplyConfiguration) WithResourceVersion(value string) *APIServ // If called multiple times, the Generation field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithGeneration(value int64) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *APIServerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *APIServerApplyConfiguration { +func (b *APIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *APIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *APIServerApplyConfiguration { +func (b *APIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *APIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *APIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *APIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *APIServerApplyConfiguration) WithLabels(entries map[string]string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *APIServerApplyConfiguration) WithLabels(entries map[string]string) *API // overwriting an existing map entries in Annotations field with the same key. func (b *APIServerApplyConfiguration) WithAnnotations(entries map[string]string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *APIServerApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *APIServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *APIServerApplyConfiguration { +func (b *APIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *APIServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *APIServerApplyConfiguration) WithFinalizers(values ...string) *APIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *APIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *APIServerApplyConfiguration) WithSpec(value *APIServerSpecApplyConfigur // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *APIServerApplyConfiguration) WithStatus(value apiconfigv1.APIServerStatus) *APIServerApplyConfiguration { +func (b *APIServerApplyConfiguration) WithStatus(value configv1.APIServerStatus) *APIServerApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *APIServerApplyConfiguration) WithStatus(value apiconfigv1.APIServerStat // GetName retrieves the value of the Name field in the declarative configuration. func (b *APIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go index a64f18bb4..6f0deb125 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // APIServerEncryptionApplyConfiguration represents a declarative configuration of the APIServerEncryption type for use // with apply. type APIServerEncryptionApplyConfiguration struct { - Type *v1.EncryptionType `json:"type,omitempty"` + Type *configv1.EncryptionType `json:"type,omitempty"` } // APIServerEncryptionApplyConfiguration constructs a declarative configuration of the APIServerEncryption type for use with @@ -21,7 +21,7 @@ func APIServerEncryption() *APIServerEncryptionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *APIServerEncryptionApplyConfiguration) WithType(value v1.EncryptionType) *APIServerEncryptionApplyConfiguration { +func (b *APIServerEncryptionApplyConfiguration) WithType(value configv1.EncryptionType) *APIServerEncryptionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go index 49b2e6c79..a07c9788c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // AuditApplyConfiguration represents a declarative configuration of the Audit type for use // with apply. type AuditApplyConfiguration struct { - Profile *v1.AuditProfileType `json:"profile,omitempty"` + Profile *configv1.AuditProfileType `json:"profile,omitempty"` CustomRules []AuditCustomRuleApplyConfiguration `json:"customRules,omitempty"` } @@ -22,7 +22,7 @@ func Audit() *AuditApplyConfiguration { // WithProfile sets the Profile field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Profile field is set to the value of the last call. -func (b *AuditApplyConfiguration) WithProfile(value v1.AuditProfileType) *AuditApplyConfiguration { +func (b *AuditApplyConfiguration) WithProfile(value configv1.AuditProfileType) *AuditApplyConfiguration { b.Profile = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go index 838f3a22f..33a696d77 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // AuditCustomRuleApplyConfiguration represents a declarative configuration of the AuditCustomRule type for use // with apply. type AuditCustomRuleApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Profile *v1.AuditProfileType `json:"profile,omitempty"` + Group *string `json:"group,omitempty"` + Profile *configv1.AuditProfileType `json:"profile,omitempty"` } // AuditCustomRuleApplyConfiguration constructs a declarative configuration of the AuditCustomRule type for use with @@ -30,7 +30,7 @@ func (b *AuditCustomRuleApplyConfiguration) WithGroup(value string) *AuditCustom // WithProfile sets the Profile field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Profile field is set to the value of the last call. -func (b *AuditCustomRuleApplyConfiguration) WithProfile(value v1.AuditProfileType) *AuditCustomRuleApplyConfiguration { +func (b *AuditCustomRuleApplyConfiguration) WithProfile(value configv1.AuditProfileType) *AuditCustomRuleApplyConfiguration { b.Profile = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go index b8fc3dfdf..6ae8497a5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // AuthenticationApplyConfiguration represents a declarative configuration of the Authentication type for use // with apply. type AuthenticationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"` - Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"` + Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` } // Authentication constructs a declarative configuration of the Authentication type for use with @@ -41,18 +41,18 @@ func Authentication(name string) *AuthenticationApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractAuthentication(authentication *apiconfigv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { +func ExtractAuthentication(authentication *configv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { return extractAuthentication(authentication, fieldManager, "") } // ExtractAuthenticationStatus is the same as ExtractAuthentication except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractAuthenticationStatus(authentication *apiconfigv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { +func ExtractAuthenticationStatus(authentication *configv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { return extractAuthentication(authentication, fieldManager, "status") } -func extractAuthentication(authentication *apiconfigv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) { +func extractAuthentication(authentication *configv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) { b := &AuthenticationApplyConfiguration{} err := managedfields.ExtractInto(authentication, internal.Parser().Type("com.github.openshift.api.config.v1.Authentication"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractAuthentication(authentication *apiconfigv1.Authentication, fieldMana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithKind(value string) *AuthenticationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *AuthenticationApplyConfiguration) WithKind(value string) *Authenticatio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *AuthenticationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *Authent // If called multiple times, the Name field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithName(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *AuthenticationApplyConfiguration) WithName(value string) *Authenticatio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *Authe // If called multiple times, the Namespace field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *Authenti // If called multiple times, the UID field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *Authenticat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *Au // If called multiple times, the Generation field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithGeneration(value int64) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *AuthenticationApplyConfiguration { +func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *AuthenticationApplyConfiguration { +func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration { +func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *AuthenticationApplyConfiguration) WithFinalizers(values ...string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *AuthenticationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatu // GetName retrieves the value of the Name field in the declarative configuration. func (b *AuthenticationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go index 27cf2004f..b2ac36278 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // AuthenticationSpecApplyConfiguration represents a declarative configuration of the AuthenticationSpec type for use // with apply. type AuthenticationSpecApplyConfiguration struct { - Type *v1.AuthenticationType `json:"type,omitempty"` + Type *configv1.AuthenticationType `json:"type,omitempty"` OAuthMetadata *ConfigMapNameReferenceApplyConfiguration `json:"oauthMetadata,omitempty"` WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticators,omitempty"` WebhookTokenAuthenticator *WebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticator,omitempty"` @@ -26,7 +26,7 @@ func AuthenticationSpec() *AuthenticationSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *AuthenticationSpecApplyConfiguration) WithType(value v1.AuthenticationType) *AuthenticationSpecApplyConfiguration { +func (b *AuthenticationSpecApplyConfiguration) WithType(value configv1.AuthenticationType) *AuthenticationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go index d7fe57934..e67e67111 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // AWSIngressSpecApplyConfiguration represents a declarative configuration of the AWSIngressSpec type for use // with apply. type AWSIngressSpecApplyConfiguration struct { - Type *v1.AWSLBType `json:"type,omitempty"` + Type *configv1.AWSLBType `json:"type,omitempty"` } // AWSIngressSpecApplyConfiguration constructs a declarative configuration of the AWSIngressSpec type for use with @@ -21,7 +21,7 @@ func AWSIngressSpec() *AWSIngressSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *AWSIngressSpecApplyConfiguration) WithType(value v1.AWSLBType) *AWSIngressSpecApplyConfiguration { +func (b *AWSIngressSpecApplyConfiguration) WithType(value configv1.AWSLBType) *AWSIngressSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go index e5b1b74ea..b217e5bdc 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go @@ -5,9 +5,10 @@ package v1 // AWSPlatformStatusApplyConfiguration represents a declarative configuration of the AWSPlatformStatus type for use // with apply. type AWSPlatformStatusApplyConfiguration struct { - Region *string `json:"region,omitempty"` - ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` - ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + Region *string `json:"region,omitempty"` + ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` + ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` } // AWSPlatformStatusApplyConfiguration constructs a declarative configuration of the AWSPlatformStatus type for use with @@ -49,3 +50,11 @@ func (b *AWSPlatformStatusApplyConfiguration) WithResourceTags(values ...*AWSRes } return b } + +// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call. +func (b *AWSPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *AWSPlatformStatusApplyConfiguration { + b.CloudLoadBalancerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go index 442674734..5348a3c99 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go @@ -3,7 +3,7 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // AzurePlatformStatusApplyConfiguration represents a declarative configuration of the AzurePlatformStatus type for use @@ -11,7 +11,7 @@ import ( type AzurePlatformStatusApplyConfiguration struct { ResourceGroupName *string `json:"resourceGroupName,omitempty"` NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"` - CloudName *v1.AzureCloudEnvironment `json:"cloudName,omitempty"` + CloudName *configv1.AzureCloudEnvironment `json:"cloudName,omitempty"` ARMEndpoint *string `json:"armEndpoint,omitempty"` ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` } @@ -41,7 +41,7 @@ func (b *AzurePlatformStatusApplyConfiguration) WithNetworkResourceGroupName(val // WithCloudName sets the CloudName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CloudName field is set to the value of the last call. -func (b *AzurePlatformStatusApplyConfiguration) WithCloudName(value v1.AzureCloudEnvironment) *AzurePlatformStatusApplyConfiguration { +func (b *AzurePlatformStatusApplyConfiguration) WithCloudName(value configv1.AzureCloudEnvironment) *AzurePlatformStatusApplyConfiguration { b.CloudName = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go index a78284764..4a7405ad8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // BareMetalPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the BareMetalPlatformLoadBalancer type for use // with apply. type BareMetalPlatformLoadBalancerApplyConfiguration struct { - Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` } // BareMetalPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the BareMetalPlatformLoadBalancer type for use with @@ -21,7 +21,7 @@ func BareMetalPlatformLoadBalancer() *BareMetalPlatformLoadBalancerApplyConfigur // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *BareMetalPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *BareMetalPlatformLoadBalancerApplyConfiguration { +func (b *BareMetalPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *BareMetalPlatformLoadBalancerApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go index 3140b5548..81d808775 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // BareMetalPlatformSpecApplyConfiguration represents a declarative configuration of the BareMetalPlatformSpec type for use // with apply. type BareMetalPlatformSpecApplyConfiguration struct { - APIServerInternalIPs []v1.IP `json:"apiServerInternalIPs,omitempty"` - IngressIPs []v1.IP `json:"ingressIPs,omitempty"` - MachineNetworks []v1.CIDR `json:"machineNetworks,omitempty"` + APIServerInternalIPs []configv1.IP `json:"apiServerInternalIPs,omitempty"` + IngressIPs []configv1.IP `json:"ingressIPs,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } // BareMetalPlatformSpecApplyConfiguration constructs a declarative configuration of the BareMetalPlatformSpec type for use with @@ -23,7 +23,7 @@ func BareMetalPlatformSpec() *BareMetalPlatformSpecApplyConfiguration { // WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. -func (b *BareMetalPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...v1.IP) *BareMetalPlatformSpecApplyConfiguration { +func (b *BareMetalPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...configv1.IP) *BareMetalPlatformSpecApplyConfiguration { for i := range values { b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) } @@ -33,7 +33,7 @@ func (b *BareMetalPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(value // WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the IngressIPs field. -func (b *BareMetalPlatformSpecApplyConfiguration) WithIngressIPs(values ...v1.IP) *BareMetalPlatformSpecApplyConfiguration { +func (b *BareMetalPlatformSpecApplyConfiguration) WithIngressIPs(values ...configv1.IP) *BareMetalPlatformSpecApplyConfiguration { for i := range values { b.IngressIPs = append(b.IngressIPs, values[i]) } @@ -43,7 +43,7 @@ func (b *BareMetalPlatformSpecApplyConfiguration) WithIngressIPs(values ...v1.IP // WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the MachineNetworks field. -func (b *BareMetalPlatformSpecApplyConfiguration) WithMachineNetworks(values ...v1.CIDR) *BareMetalPlatformSpecApplyConfiguration { +func (b *BareMetalPlatformSpecApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *BareMetalPlatformSpecApplyConfiguration { for i := range values { b.MachineNetworks = append(b.MachineNetworks, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go index 7cbd241d0..88f30314d 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go @@ -18,7 +18,7 @@ func BasicAuthIdentityProvider() *BasicAuthIdentityProviderApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the URL field is set to the value of the last call. func (b *BasicAuthIdentityProviderApplyConfiguration) WithURL(value string) *BasicAuthIdentityProviderApplyConfiguration { - b.URL = &value + b.OAuthRemoteConnectionInfoApplyConfiguration.URL = &value return b } @@ -26,7 +26,7 @@ func (b *BasicAuthIdentityProviderApplyConfiguration) WithURL(value string) *Bas // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CA field is set to the value of the last call. func (b *BasicAuthIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration { - b.CA = value + b.OAuthRemoteConnectionInfoApplyConfiguration.CA = value return b } @@ -34,7 +34,7 @@ func (b *BasicAuthIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNam // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSClientCert field is set to the value of the last call. func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration { - b.TLSClientCert = value + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientCert = value return b } @@ -42,6 +42,6 @@ func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientCert(value *S // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSClientKey field is set to the value of the last call. func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration { - b.TLSClientKey = value + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientKey = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go index 06aa12e0f..cdadabcae 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go @@ -3,20 +3,20 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // BuildApplyConfiguration represents a declarative configuration of the Build type for use // with apply. type BuildApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"` } // Build constructs a declarative configuration of the Build type for use with @@ -40,18 +40,18 @@ func Build(name string) *BuildApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractBuild(build *apiconfigv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { +func ExtractBuild(build *configv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { return extractBuild(build, fieldManager, "") } // ExtractBuildStatus is the same as ExtractBuild except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractBuildStatus(build *apiconfigv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { +func ExtractBuildStatus(build *configv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { return extractBuild(build, fieldManager, "status") } -func extractBuild(build *apiconfigv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) { +func extractBuild(build *configv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) { b := &BuildApplyConfiguration{} err := managedfields.ExtractInto(build, internal.Parser().Type("com.github.openshift.api.config.v1.Build"), fieldManager, b, subresource) if err != nil { @@ -68,7 +68,7 @@ func extractBuild(build *apiconfigv1.Build, fieldManager string, subresource str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -76,7 +76,7 @@ func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -85,7 +85,7 @@ func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -94,7 +94,7 @@ func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -103,7 +103,7 @@ func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -112,7 +112,7 @@ func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -121,7 +121,7 @@ func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -130,25 +130,25 @@ func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *BuildApplyConfiguration) WithGeneration(value int64) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *BuildApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BuildApplyConfiguration { +func (b *BuildApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *BuildApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BuildApplyConfiguration { +func (b *BuildApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -157,7 +157,7 @@ func (b *BuildApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Buil // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -167,11 +167,11 @@ func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *B // overwriting an existing map entries in Labels field with the same key. func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -182,11 +182,11 @@ func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildAp // overwriting an existing map entries in Annotations field with the same key. func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -194,13 +194,13 @@ func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *Bu // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration { +func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -211,14 +211,14 @@ func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *BuildApplyConfiguration) WithFinalizers(values ...string) *BuildApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *BuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -233,5 +233,5 @@ func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) * // GetName retrieves the value of the Name field in the declarative configuration. func (b *BuildApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go index 60bf4ed6b..79850b75e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // CloudControllerManagerStatusApplyConfiguration represents a declarative configuration of the CloudControllerManagerStatus type for use // with apply. type CloudControllerManagerStatusApplyConfiguration struct { - State *v1.CloudControllerManagerState `json:"state,omitempty"` + State *configv1.CloudControllerManagerState `json:"state,omitempty"` } // CloudControllerManagerStatusApplyConfiguration constructs a declarative configuration of the CloudControllerManagerStatus type for use with @@ -21,7 +21,7 @@ func CloudControllerManagerStatus() *CloudControllerManagerStatusApplyConfigurat // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *CloudControllerManagerStatusApplyConfiguration) WithState(value v1.CloudControllerManagerState) *CloudControllerManagerStatusApplyConfiguration { +func (b *CloudControllerManagerStatusApplyConfiguration) WithState(value configv1.CloudControllerManagerState) *CloudControllerManagerStatusApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go index 5be77a3a3..d73faf3f2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // CloudLoadBalancerConfigApplyConfiguration represents a declarative configuration of the CloudLoadBalancerConfig type for use // with apply. type CloudLoadBalancerConfigApplyConfiguration struct { - DNSType *v1.DNSType `json:"dnsType,omitempty"` + DNSType *configv1.DNSType `json:"dnsType,omitempty"` ClusterHosted *CloudLoadBalancerIPsApplyConfiguration `json:"clusterHosted,omitempty"` } @@ -22,7 +22,7 @@ func CloudLoadBalancerConfig() *CloudLoadBalancerConfigApplyConfiguration { // WithDNSType sets the DNSType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DNSType field is set to the value of the last call. -func (b *CloudLoadBalancerConfigApplyConfiguration) WithDNSType(value v1.DNSType) *CloudLoadBalancerConfigApplyConfiguration { +func (b *CloudLoadBalancerConfigApplyConfiguration) WithDNSType(value configv1.DNSType) *CloudLoadBalancerConfigApplyConfiguration { b.DNSType = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go index baef18811..ce7f25850 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // CloudLoadBalancerIPsApplyConfiguration represents a declarative configuration of the CloudLoadBalancerIPs type for use // with apply. type CloudLoadBalancerIPsApplyConfiguration struct { - APIIntLoadBalancerIPs []v1.IP `json:"apiIntLoadBalancerIPs,omitempty"` - APILoadBalancerIPs []v1.IP `json:"apiLoadBalancerIPs,omitempty"` - IngressLoadBalancerIPs []v1.IP `json:"ingressLoadBalancerIPs,omitempty"` + APIIntLoadBalancerIPs []configv1.IP `json:"apiIntLoadBalancerIPs,omitempty"` + APILoadBalancerIPs []configv1.IP `json:"apiLoadBalancerIPs,omitempty"` + IngressLoadBalancerIPs []configv1.IP `json:"ingressLoadBalancerIPs,omitempty"` } // CloudLoadBalancerIPsApplyConfiguration constructs a declarative configuration of the CloudLoadBalancerIPs type for use with @@ -23,7 +23,7 @@ func CloudLoadBalancerIPs() *CloudLoadBalancerIPsApplyConfiguration { // WithAPIIntLoadBalancerIPs adds the given value to the APIIntLoadBalancerIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIIntLoadBalancerIPs field. -func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPIIntLoadBalancerIPs(values ...v1.IP) *CloudLoadBalancerIPsApplyConfiguration { +func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPIIntLoadBalancerIPs(values ...configv1.IP) *CloudLoadBalancerIPsApplyConfiguration { for i := range values { b.APIIntLoadBalancerIPs = append(b.APIIntLoadBalancerIPs, values[i]) } @@ -33,7 +33,7 @@ func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPIIntLoadBalancerIPs(value // WithAPILoadBalancerIPs adds the given value to the APILoadBalancerIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APILoadBalancerIPs field. -func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPILoadBalancerIPs(values ...v1.IP) *CloudLoadBalancerIPsApplyConfiguration { +func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPILoadBalancerIPs(values ...configv1.IP) *CloudLoadBalancerIPsApplyConfiguration { for i := range values { b.APILoadBalancerIPs = append(b.APILoadBalancerIPs, values[i]) } @@ -43,7 +43,7 @@ func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPILoadBalancerIPs(values . // WithIngressLoadBalancerIPs adds the given value to the IngressLoadBalancerIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the IngressLoadBalancerIPs field. -func (b *CloudLoadBalancerIPsApplyConfiguration) WithIngressLoadBalancerIPs(values ...v1.IP) *CloudLoadBalancerIPsApplyConfiguration { +func (b *CloudLoadBalancerIPsApplyConfiguration) WithIngressLoadBalancerIPs(values ...configv1.IP) *CloudLoadBalancerIPsApplyConfiguration { for i := range values { b.IngressLoadBalancerIPs = append(b.IngressLoadBalancerIPs, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go index 6371179a8..4bfa43805 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go @@ -5,19 +5,19 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ClusterOperatorApplyConfiguration represents a declarative configuration of the ClusterOperator type for use // with apply. type ClusterOperatorApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *configv1.ClusterOperatorSpec `json:"spec,omitempty"` - Status *ClusterOperatorStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *configv1.ClusterOperatorSpec `json:"spec,omitempty"` + Status *ClusterOperatorStatusApplyConfiguration `json:"status,omitempty"` } // ClusterOperator constructs a declarative configuration of the ClusterOperator type for use with @@ -69,7 +69,7 @@ func extractClusterOperator(clusterOperator *configv1.ClusterOperator, fieldMana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithKind(value string) *ClusterOperatorApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ClusterOperatorApplyConfiguration) WithKind(value string) *ClusterOpera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithAPIVersion(value string) *ClusterOperatorApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ClusterOperatorApplyConfiguration) WithAPIVersion(value string) *Cluste // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithName(value string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ClusterOperatorApplyConfiguration) WithName(value string) *ClusterOpera // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithGenerateName(value string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ClusterOperatorApplyConfiguration) WithGenerateName(value string) *Clus // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithNamespace(value string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ClusterOperatorApplyConfiguration) WithNamespace(value string) *Cluster // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithUID(value types.UID) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ClusterOperatorApplyConfiguration) WithUID(value types.UID) *ClusterOpe // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithResourceVersion(value string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ClusterOperatorApplyConfiguration) WithResourceVersion(value string) *C // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithGeneration(value int64) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterOperatorApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterOperatorApplyConfiguration { +func (b *ClusterOperatorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterOperatorApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterOperatorApplyConfiguration { +func (b *ClusterOperatorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ClusterOperatorApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ClusterOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *ClusterOperatorApplyConfiguration) WithLabels(entries map[string]string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ClusterOperatorApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterOperatorApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ClusterOperatorApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterOperatorApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterOperatorApplyConfiguration { +func (b *ClusterOperatorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ClusterOperatorApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *ClusterOperatorApplyConfiguration) WithFinalizers(values ...string) *ClusterOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterOperatorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ClusterOperatorApplyConfiguration) WithStatus(value *ClusterOperatorSta // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterOperatorApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go index 557f75d2e..3e58daa81 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go @@ -3,18 +3,18 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ClusterOperatorStatusConditionApplyConfiguration represents a declarative configuration of the ClusterOperatorStatusCondition type for use // with apply. type ClusterOperatorStatusConditionApplyConfiguration struct { - Type *v1.ClusterStatusConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *configv1.ClusterStatusConditionType `json:"type,omitempty"` + Status *configv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // ClusterOperatorStatusConditionApplyConfiguration constructs a declarative configuration of the ClusterOperatorStatusCondition type for use with @@ -26,7 +26,7 @@ func ClusterOperatorStatusCondition() *ClusterOperatorStatusConditionApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ClusterOperatorStatusConditionApplyConfiguration) WithType(value v1.ClusterStatusConditionType) *ClusterOperatorStatusConditionApplyConfiguration { +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithType(value configv1.ClusterStatusConditionType) *ClusterOperatorStatusConditionApplyConfiguration { b.Type = &value return b } @@ -34,7 +34,7 @@ func (b *ClusterOperatorStatusConditionApplyConfiguration) WithType(value v1.Clu // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ClusterOperatorStatusConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ClusterOperatorStatusConditionApplyConfiguration { +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithStatus(value configv1.ConditionStatus) *ClusterOperatorStatusConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go index 52f2e5cb8..69073ee5c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ClusterVersionApplyConfiguration represents a declarative configuration of the ClusterVersion type for use // with apply. type ClusterVersionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ClusterVersionSpecApplyConfiguration `json:"spec,omitempty"` - Status *ClusterVersionStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterVersionSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterVersionStatusApplyConfiguration `json:"status,omitempty"` } // ClusterVersion constructs a declarative configuration of the ClusterVersion type for use with @@ -41,18 +41,18 @@ func ClusterVersion(name string) *ClusterVersionApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterVersion(clusterVersion *apiconfigv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) { +func ExtractClusterVersion(clusterVersion *configv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) { return extractClusterVersion(clusterVersion, fieldManager, "") } // ExtractClusterVersionStatus is the same as ExtractClusterVersion except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterVersionStatus(clusterVersion *apiconfigv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) { +func ExtractClusterVersionStatus(clusterVersion *configv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) { return extractClusterVersion(clusterVersion, fieldManager, "status") } -func extractClusterVersion(clusterVersion *apiconfigv1.ClusterVersion, fieldManager string, subresource string) (*ClusterVersionApplyConfiguration, error) { +func extractClusterVersion(clusterVersion *configv1.ClusterVersion, fieldManager string, subresource string) (*ClusterVersionApplyConfiguration, error) { b := &ClusterVersionApplyConfiguration{} err := managedfields.ExtractInto(clusterVersion, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterVersion"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractClusterVersion(clusterVersion *apiconfigv1.ClusterVersion, fieldMana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithKind(value string) *ClusterVersionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ClusterVersionApplyConfiguration) WithKind(value string) *ClusterVersio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithAPIVersion(value string) *ClusterVersionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ClusterVersionApplyConfiguration) WithAPIVersion(value string) *Cluster // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithName(value string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ClusterVersionApplyConfiguration) WithName(value string) *ClusterVersio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithGenerateName(value string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ClusterVersionApplyConfiguration) WithGenerateName(value string) *Clust // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithNamespace(value string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ClusterVersionApplyConfiguration) WithNamespace(value string) *ClusterV // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithUID(value types.UID) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ClusterVersionApplyConfiguration) WithUID(value types.UID) *ClusterVers // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithResourceVersion(value string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ClusterVersionApplyConfiguration) WithResourceVersion(value string) *Cl // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithGeneration(value int64) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterVersionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterVersionApplyConfiguration { +func (b *ClusterVersionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterVersionApplyConfiguration { +func (b *ClusterVersionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ClusterVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ClusterVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *ClusterVersionApplyConfiguration) WithLabels(entries map[string]string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ClusterVersionApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterVersionApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ClusterVersionApplyConfiguration) WithAnnotations(entries map[string]st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterVersionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterVersionApplyConfiguration { +func (b *ClusterVersionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ClusterVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *ClusterVersionApplyConfiguration) WithFinalizers(values ...string) *ClusterVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ClusterVersionApplyConfiguration) WithStatus(value *ClusterVersionStatu // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterVersionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go index 254eb00e9..feb03e3c3 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ClusterVersionCapabilitiesSpecApplyConfiguration represents a declarative configuration of the ClusterVersionCapabilitiesSpec type for use // with apply. type ClusterVersionCapabilitiesSpecApplyConfiguration struct { - BaselineCapabilitySet *v1.ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` - AdditionalEnabledCapabilities []v1.ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` + BaselineCapabilitySet *configv1.ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` + AdditionalEnabledCapabilities []configv1.ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` } // ClusterVersionCapabilitiesSpecApplyConfiguration constructs a declarative configuration of the ClusterVersionCapabilitiesSpec type for use with @@ -22,7 +22,7 @@ func ClusterVersionCapabilitiesSpec() *ClusterVersionCapabilitiesSpecApplyConfig // WithBaselineCapabilitySet sets the BaselineCapabilitySet field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the BaselineCapabilitySet field is set to the value of the last call. -func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithBaselineCapabilitySet(value v1.ClusterVersionCapabilitySet) *ClusterVersionCapabilitiesSpecApplyConfiguration { +func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithBaselineCapabilitySet(value configv1.ClusterVersionCapabilitySet) *ClusterVersionCapabilitiesSpecApplyConfiguration { b.BaselineCapabilitySet = &value return b } @@ -30,7 +30,7 @@ func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithBaselineCapabilit // WithAdditionalEnabledCapabilities adds the given value to the AdditionalEnabledCapabilities field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AdditionalEnabledCapabilities field. -func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithAdditionalEnabledCapabilities(values ...v1.ClusterVersionCapability) *ClusterVersionCapabilitiesSpecApplyConfiguration { +func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithAdditionalEnabledCapabilities(values ...configv1.ClusterVersionCapability) *ClusterVersionCapabilitiesSpecApplyConfiguration { for i := range values { b.AdditionalEnabledCapabilities = append(b.AdditionalEnabledCapabilities, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go index 05350241f..2a8807fe2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ClusterVersionCapabilitiesStatusApplyConfiguration represents a declarative configuration of the ClusterVersionCapabilitiesStatus type for use // with apply. type ClusterVersionCapabilitiesStatusApplyConfiguration struct { - EnabledCapabilities []v1.ClusterVersionCapability `json:"enabledCapabilities,omitempty"` - KnownCapabilities []v1.ClusterVersionCapability `json:"knownCapabilities,omitempty"` + EnabledCapabilities []configv1.ClusterVersionCapability `json:"enabledCapabilities,omitempty"` + KnownCapabilities []configv1.ClusterVersionCapability `json:"knownCapabilities,omitempty"` } // ClusterVersionCapabilitiesStatusApplyConfiguration constructs a declarative configuration of the ClusterVersionCapabilitiesStatus type for use with @@ -22,7 +22,7 @@ func ClusterVersionCapabilitiesStatus() *ClusterVersionCapabilitiesStatusApplyCo // WithEnabledCapabilities adds the given value to the EnabledCapabilities field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the EnabledCapabilities field. -func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithEnabledCapabilities(values ...v1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration { +func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithEnabledCapabilities(values ...configv1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration { for i := range values { b.EnabledCapabilities = append(b.EnabledCapabilities, values[i]) } @@ -32,7 +32,7 @@ func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithEnabledCapabili // WithKnownCapabilities adds the given value to the KnownCapabilities field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the KnownCapabilities field. -func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithKnownCapabilities(values ...v1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration { +func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithKnownCapabilities(values ...configv1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration { for i := range values { b.KnownCapabilities = append(b.KnownCapabilities, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go index b440c613b..926f29557 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ClusterVersionSpecApplyConfiguration represents a declarative configuration of the ClusterVersionSpec type for use // with apply. type ClusterVersionSpecApplyConfiguration struct { - ClusterID *v1.ClusterID `json:"clusterID,omitempty"` + ClusterID *configv1.ClusterID `json:"clusterID,omitempty"` DesiredUpdate *UpdateApplyConfiguration `json:"desiredUpdate,omitempty"` - Upstream *v1.URL `json:"upstream,omitempty"` + Upstream *configv1.URL `json:"upstream,omitempty"` Channel *string `json:"channel,omitempty"` Capabilities *ClusterVersionCapabilitiesSpecApplyConfiguration `json:"capabilities,omitempty"` SignatureStores []SignatureStoreApplyConfiguration `json:"signatureStores,omitempty"` @@ -27,7 +27,7 @@ func ClusterVersionSpec() *ClusterVersionSpecApplyConfiguration { // WithClusterID sets the ClusterID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ClusterID field is set to the value of the last call. -func (b *ClusterVersionSpecApplyConfiguration) WithClusterID(value v1.ClusterID) *ClusterVersionSpecApplyConfiguration { +func (b *ClusterVersionSpecApplyConfiguration) WithClusterID(value configv1.ClusterID) *ClusterVersionSpecApplyConfiguration { b.ClusterID = &value return b } @@ -43,7 +43,7 @@ func (b *ClusterVersionSpecApplyConfiguration) WithDesiredUpdate(value *UpdateAp // WithUpstream sets the Upstream field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Upstream field is set to the value of the last call. -func (b *ClusterVersionSpecApplyConfiguration) WithUpstream(value v1.URL) *ClusterVersionSpecApplyConfiguration { +func (b *ClusterVersionSpecApplyConfiguration) WithUpstream(value configv1.URL) *ClusterVersionSpecApplyConfiguration { b.Upstream = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go index f8a2c5e51..beebd2b02 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go @@ -3,7 +3,7 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ComponentRouteSpecApplyConfiguration represents a declarative configuration of the ComponentRouteSpec type for use @@ -11,7 +11,7 @@ import ( type ComponentRouteSpecApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` - Hostname *v1.Hostname `json:"hostname,omitempty"` + Hostname *configv1.Hostname `json:"hostname,omitempty"` ServingCertKeyPairSecret *SecretNameReferenceApplyConfiguration `json:"servingCertKeyPairSecret,omitempty"` } @@ -40,7 +40,7 @@ func (b *ComponentRouteSpecApplyConfiguration) WithName(value string) *Component // WithHostname sets the Hostname field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Hostname field is set to the value of the last call. -func (b *ComponentRouteSpecApplyConfiguration) WithHostname(value v1.Hostname) *ComponentRouteSpecApplyConfiguration { +func (b *ComponentRouteSpecApplyConfiguration) WithHostname(value configv1.Hostname) *ComponentRouteSpecApplyConfiguration { b.Hostname = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go index abf378c84..ae9553882 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go @@ -3,7 +3,7 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -12,9 +12,9 @@ import ( type ComponentRouteStatusApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` - DefaultHostname *v1.Hostname `json:"defaultHostname,omitempty"` - ConsumingUsers []v1.ConsumingUser `json:"consumingUsers,omitempty"` - CurrentHostnames []v1.Hostname `json:"currentHostnames,omitempty"` + DefaultHostname *configv1.Hostname `json:"defaultHostname,omitempty"` + ConsumingUsers []configv1.ConsumingUser `json:"consumingUsers,omitempty"` + CurrentHostnames []configv1.Hostname `json:"currentHostnames,omitempty"` Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` } @@ -44,7 +44,7 @@ func (b *ComponentRouteStatusApplyConfiguration) WithName(value string) *Compone // WithDefaultHostname sets the DefaultHostname field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DefaultHostname field is set to the value of the last call. -func (b *ComponentRouteStatusApplyConfiguration) WithDefaultHostname(value v1.Hostname) *ComponentRouteStatusApplyConfiguration { +func (b *ComponentRouteStatusApplyConfiguration) WithDefaultHostname(value configv1.Hostname) *ComponentRouteStatusApplyConfiguration { b.DefaultHostname = &value return b } @@ -52,7 +52,7 @@ func (b *ComponentRouteStatusApplyConfiguration) WithDefaultHostname(value v1.Ho // WithConsumingUsers adds the given value to the ConsumingUsers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ConsumingUsers field. -func (b *ComponentRouteStatusApplyConfiguration) WithConsumingUsers(values ...v1.ConsumingUser) *ComponentRouteStatusApplyConfiguration { +func (b *ComponentRouteStatusApplyConfiguration) WithConsumingUsers(values ...configv1.ConsumingUser) *ComponentRouteStatusApplyConfiguration { for i := range values { b.ConsumingUsers = append(b.ConsumingUsers, values[i]) } @@ -62,7 +62,7 @@ func (b *ComponentRouteStatusApplyConfiguration) WithConsumingUsers(values ...v1 // WithCurrentHostnames adds the given value to the CurrentHostnames field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CurrentHostnames field. -func (b *ComponentRouteStatusApplyConfiguration) WithCurrentHostnames(values ...v1.Hostname) *ComponentRouteStatusApplyConfiguration { +func (b *ComponentRouteStatusApplyConfiguration) WithCurrentHostnames(values ...configv1.Hostname) *ComponentRouteStatusApplyConfiguration { for i := range values { b.CurrentHostnames = append(b.CurrentHostnames, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go index 58bf394da..8e04091da 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ConsoleApplyConfiguration represents a declarative configuration of the Console type for use // with apply. type ConsoleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"` - Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` } // Console constructs a declarative configuration of the Console type for use with @@ -41,18 +41,18 @@ func Console(name string) *ConsoleApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractConsole(console *apiconfigv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { +func ExtractConsole(console *configv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { return extractConsole(console, fieldManager, "") } // ExtractConsoleStatus is the same as ExtractConsole except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractConsoleStatus(console *apiconfigv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { +func ExtractConsoleStatus(console *configv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { return extractConsole(console, fieldManager, "status") } -func extractConsole(console *apiconfigv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) { +func extractConsole(console *configv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) { b := &ConsoleApplyConfiguration{} err := managedfields.ExtractInto(console, internal.Parser().Type("com.github.openshift.api.config.v1.Console"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractConsole(console *apiconfigv1.Console, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleAp // If called multiple times, the Generation field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithGeneration(value int64) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConsoleApplyConfiguration { +func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConsoleApplyConfiguration { +func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Co // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *Conso // overwriting an existing map entries in Annotations field with the same key. func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration { +func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *ConsoleApplyConfiguration) WithFinalizers(values ...string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ConsoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConsoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go index a0a648287..7cd70c7ee 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // CustomFeatureGatesApplyConfiguration represents a declarative configuration of the CustomFeatureGates type for use // with apply. type CustomFeatureGatesApplyConfiguration struct { - Enabled []v1.FeatureGateName `json:"enabled,omitempty"` - Disabled []v1.FeatureGateName `json:"disabled,omitempty"` + Enabled []configv1.FeatureGateName `json:"enabled,omitempty"` + Disabled []configv1.FeatureGateName `json:"disabled,omitempty"` } // CustomFeatureGatesApplyConfiguration constructs a declarative configuration of the CustomFeatureGates type for use with @@ -22,7 +22,7 @@ func CustomFeatureGates() *CustomFeatureGatesApplyConfiguration { // WithEnabled adds the given value to the Enabled field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Enabled field. -func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...v1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { +func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...configv1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { for i := range values { b.Enabled = append(b.Enabled, values[i]) } @@ -32,7 +32,7 @@ func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...v1.FeatureG // WithDisabled adds the given value to the Disabled field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Disabled field. -func (b *CustomFeatureGatesApplyConfiguration) WithDisabled(values ...v1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { +func (b *CustomFeatureGatesApplyConfiguration) WithDisabled(values ...configv1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { for i := range values { b.Disabled = append(b.Disabled, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go index f323e11b1..ae03671cd 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go @@ -23,7 +23,7 @@ func CustomTLSProfile() *CustomTLSProfileApplyConfiguration { // If called multiple times, values provided by each call will be appended to the Ciphers field. func (b *CustomTLSProfileApplyConfiguration) WithCiphers(values ...string) *CustomTLSProfileApplyConfiguration { for i := range values { - b.Ciphers = append(b.Ciphers, values[i]) + b.TLSProfileSpecApplyConfiguration.Ciphers = append(b.TLSProfileSpecApplyConfiguration.Ciphers, values[i]) } return b } @@ -32,6 +32,6 @@ func (b *CustomTLSProfileApplyConfiguration) WithCiphers(values ...string) *Cust // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MinTLSVersion field is set to the value of the last call. func (b *CustomTLSProfileApplyConfiguration) WithMinTLSVersion(value configv1.TLSProtocolVersion) *CustomTLSProfileApplyConfiguration { - b.MinTLSVersion = &value + b.TLSProfileSpecApplyConfiguration.MinTLSVersion = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go index b01e456f5..4ca934c96 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // DNSApplyConfiguration represents a declarative configuration of the DNS type for use // with apply. type DNSApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.DNSStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.DNSStatus `json:"status,omitempty"` } // DNS constructs a declarative configuration of the DNS type for use with @@ -41,18 +41,18 @@ func DNS(name string) *DNSApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractDNS(dNS *apiconfigv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { +func ExtractDNS(dNS *configv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { return extractDNS(dNS, fieldManager, "") } // ExtractDNSStatus is the same as ExtractDNS except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractDNSStatus(dNS *apiconfigv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { +func ExtractDNSStatus(dNS *configv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { return extractDNS(dNS, fieldManager, "status") } -func extractDNS(dNS *apiconfigv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) { +func extractDNS(dNS *configv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) { b := &DNSApplyConfiguration{} err := managedfields.ExtractInto(dNS, internal.Parser().Type("com.github.openshift.api.config.v1.DNS"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractDNS(dNS *apiconfigv1.DNS, fieldManager string, subresource string) ( // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfigurat // If called multiple times, the Name field is set to the value of the last call. func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration { // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfigur // If called multiple times, the Namespace field is set to the value of the last call. func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfigurati // If called multiple times, the UID field is set to the value of the last call. func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfi // If called multiple times, the Generation field is set to the value of the last call. func (b *DNSApplyConfiguration) WithGeneration(value int64) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DNSApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DNSApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *DNSApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DNSApp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNS // overwriting an existing map entries in Labels field with the same key. func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyC // overwriting an existing map entries in Annotations field with the same key. func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference func (b *DNSApplyConfiguration) WithFinalizers(values ...string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *DNSApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *DNSApplyConfiguration) WithSpec(value *DNSSpecApplyConfiguration) *DNSA // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *DNSApplyConfiguration) WithStatus(value apiconfigv1.DNSStatus) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithStatus(value configv1.DNSStatus) *DNSApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *DNSApplyConfiguration) WithStatus(value apiconfigv1.DNSStatus) *DNSAppl // GetName retrieves the value of the Name field in the declarative configuration. func (b *DNSApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go index fc15db1ef..46bf616b2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // DNSPlatformSpecApplyConfiguration represents a declarative configuration of the DNSPlatformSpec type for use // with apply. type DNSPlatformSpecApplyConfiguration struct { - Type *v1.PlatformType `json:"type,omitempty"` + Type *configv1.PlatformType `json:"type,omitempty"` AWS *AWSDNSSpecApplyConfiguration `json:"aws,omitempty"` } @@ -22,7 +22,7 @@ func DNSPlatformSpec() *DNSPlatformSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DNSPlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *DNSPlatformSpecApplyConfiguration { +func (b *DNSPlatformSpecApplyConfiguration) WithType(value configv1.PlatformType) *DNSPlatformSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go index b17945290..73ec53314 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // FeatureGateApplyConfiguration represents a declarative configuration of the FeatureGate type for use // with apply. type FeatureGateApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *FeatureGateSpecApplyConfiguration `json:"spec,omitempty"` - Status *FeatureGateStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FeatureGateSpecApplyConfiguration `json:"spec,omitempty"` + Status *FeatureGateStatusApplyConfiguration `json:"status,omitempty"` } // FeatureGate constructs a declarative configuration of the FeatureGate type for use with @@ -41,18 +41,18 @@ func FeatureGate(name string) *FeatureGateApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFeatureGate(featureGate *apiconfigv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) { +func ExtractFeatureGate(featureGate *configv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) { return extractFeatureGate(featureGate, fieldManager, "") } // ExtractFeatureGateStatus is the same as ExtractFeatureGate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFeatureGateStatus(featureGate *apiconfigv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) { +func ExtractFeatureGateStatus(featureGate *configv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) { return extractFeatureGate(featureGate, fieldManager, "status") } -func extractFeatureGate(featureGate *apiconfigv1.FeatureGate, fieldManager string, subresource string) (*FeatureGateApplyConfiguration, error) { +func extractFeatureGate(featureGate *configv1.FeatureGate, fieldManager string, subresource string) (*FeatureGateApplyConfiguration, error) { b := &FeatureGateApplyConfiguration{} err := managedfields.ExtractInto(featureGate, internal.Parser().Type("com.github.openshift.api.config.v1.FeatureGate"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractFeatureGate(featureGate *apiconfigv1.FeatureGate, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithKind(value string) *FeatureGateApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *FeatureGateApplyConfiguration) WithKind(value string) *FeatureGateApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithAPIVersion(value string) *FeatureGateApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *FeatureGateApplyConfiguration) WithAPIVersion(value string) *FeatureGat // If called multiple times, the Name field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithName(value string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *FeatureGateApplyConfiguration) WithName(value string) *FeatureGateApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithGenerateName(value string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *FeatureGateApplyConfiguration) WithGenerateName(value string) *FeatureG // If called multiple times, the Namespace field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithNamespace(value string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *FeatureGateApplyConfiguration) WithNamespace(value string) *FeatureGate // If called multiple times, the UID field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithUID(value types.UID) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *FeatureGateApplyConfiguration) WithUID(value types.UID) *FeatureGateApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithResourceVersion(value string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *FeatureGateApplyConfiguration) WithResourceVersion(value string) *Featu // If called multiple times, the Generation field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithGeneration(value int64) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *FeatureGateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FeatureGateApplyConfiguration { +func (b *FeatureGateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *FeatureGateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FeatureGateApplyConfiguration { +func (b *FeatureGateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *FeatureGateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FeatureGateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *FeatureGateApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *FeatureGateApplyConfiguration) WithLabels(entries map[string]string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *FeatureGateApplyConfiguration) WithLabels(entries map[string]string) *F // overwriting an existing map entries in Annotations field with the same key. func (b *FeatureGateApplyConfiguration) WithAnnotations(entries map[string]string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *FeatureGateApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *FeatureGateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FeatureGateApplyConfiguration { +func (b *FeatureGateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *FeatureGateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *FeatureGateApplyConfiguration) WithFinalizers(values ...string) *FeatureGateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *FeatureGateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *FeatureGateApplyConfiguration) WithStatus(value *FeatureGateStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *FeatureGateApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go index 200460a29..7884ec287 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // FeatureGateAttributesApplyConfiguration represents a declarative configuration of the FeatureGateAttributes type for use // with apply. type FeatureGateAttributesApplyConfiguration struct { - Name *v1.FeatureGateName `json:"name,omitempty"` + Name *configv1.FeatureGateName `json:"name,omitempty"` } // FeatureGateAttributesApplyConfiguration constructs a declarative configuration of the FeatureGateAttributes type for use with @@ -21,7 +21,7 @@ func FeatureGateAttributes() *FeatureGateAttributesApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *FeatureGateAttributesApplyConfiguration) WithName(value v1.FeatureGateName) *FeatureGateAttributesApplyConfiguration { +func (b *FeatureGateAttributesApplyConfiguration) WithName(value configv1.FeatureGateName) *FeatureGateAttributesApplyConfiguration { b.Name = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go index 2aac4666e..b79d3f883 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // FeatureGateSelectionApplyConfiguration represents a declarative configuration of the FeatureGateSelection type for use // with apply. type FeatureGateSelectionApplyConfiguration struct { - FeatureSet *v1.FeatureSet `json:"featureSet,omitempty"` + FeatureSet *configv1.FeatureSet `json:"featureSet,omitempty"` CustomNoUpgrade *CustomFeatureGatesApplyConfiguration `json:"customNoUpgrade,omitempty"` } @@ -22,7 +22,7 @@ func FeatureGateSelection() *FeatureGateSelectionApplyConfiguration { // WithFeatureSet sets the FeatureSet field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FeatureSet field is set to the value of the last call. -func (b *FeatureGateSelectionApplyConfiguration) WithFeatureSet(value v1.FeatureSet) *FeatureGateSelectionApplyConfiguration { +func (b *FeatureGateSelectionApplyConfiguration) WithFeatureSet(value configv1.FeatureSet) *FeatureGateSelectionApplyConfiguration { b.FeatureSet = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go index 39b85b5dd..d7e6f5e2b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go @@ -22,7 +22,7 @@ func FeatureGateSpec() *FeatureGateSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FeatureSet field is set to the value of the last call. func (b *FeatureGateSpecApplyConfiguration) WithFeatureSet(value configv1.FeatureSet) *FeatureGateSpecApplyConfiguration { - b.FeatureSet = &value + b.FeatureGateSelectionApplyConfiguration.FeatureSet = &value return b } @@ -30,6 +30,6 @@ func (b *FeatureGateSpecApplyConfiguration) WithFeatureSet(value configv1.Featur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CustomNoUpgrade field is set to the value of the last call. func (b *FeatureGateSpecApplyConfiguration) WithCustomNoUpgrade(value *CustomFeatureGatesApplyConfiguration) *FeatureGateSpecApplyConfiguration { - b.CustomNoUpgrade = value + b.FeatureGateSelectionApplyConfiguration.CustomNoUpgrade = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go index 2cc69267e..705c3d0cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // FeatureGateStatusApplyConfiguration represents a declarative configuration of the FeatureGateStatus type for use // with apply. type FeatureGateStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` FeatureGates []FeatureGateDetailsApplyConfiguration `json:"featureGates,omitempty"` } @@ -22,7 +22,7 @@ func FeatureGateStatus() *FeatureGateStatusApplyConfiguration { // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *FeatureGateStatusApplyConfiguration { +func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *FeatureGateStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go index 4a8f0c437..1688b1ce4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go @@ -21,7 +21,7 @@ func HubSourceStatus() *HubSourceStatusApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *HubSourceStatusApplyConfiguration) WithName(value string) *HubSourceStatusApplyConfiguration { b.ensureHubSourceApplyConfigurationExists() - b.Name = &value + b.HubSourceApplyConfiguration.Name = &value return b } @@ -30,7 +30,7 @@ func (b *HubSourceStatusApplyConfiguration) WithName(value string) *HubSourceSta // If called multiple times, the Disabled field is set to the value of the last call. func (b *HubSourceStatusApplyConfiguration) WithDisabled(value bool) *HubSourceStatusApplyConfiguration { b.ensureHubSourceApplyConfigurationExists() - b.Disabled = &value + b.HubSourceApplyConfiguration.Disabled = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go index b4b3be2ff..48c17c9cb 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go @@ -3,7 +3,7 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // IBMCloudPlatformStatusApplyConfiguration represents a declarative configuration of the IBMCloudPlatformStatus type for use @@ -11,7 +11,7 @@ import ( type IBMCloudPlatformStatusApplyConfiguration struct { Location *string `json:"location,omitempty"` ResourceGroupName *string `json:"resourceGroupName,omitempty"` - ProviderType *v1.IBMCloudProviderType `json:"providerType,omitempty"` + ProviderType *configv1.IBMCloudProviderType `json:"providerType,omitempty"` CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"` DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"` ServiceEndpoints []IBMCloudServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` @@ -42,7 +42,7 @@ func (b *IBMCloudPlatformStatusApplyConfiguration) WithResourceGroupName(value s // WithProviderType sets the ProviderType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ProviderType field is set to the value of the last call. -func (b *IBMCloudPlatformStatusApplyConfiguration) WithProviderType(value v1.IBMCloudProviderType) *IBMCloudPlatformStatusApplyConfiguration { +func (b *IBMCloudPlatformStatusApplyConfiguration) WithProviderType(value configv1.IBMCloudProviderType) *IBMCloudPlatformStatusApplyConfiguration { b.ProviderType = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go index 229104061..daec88ba5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // IBMCloudServiceEndpointApplyConfiguration represents a declarative configuration of the IBMCloudServiceEndpoint type for use // with apply. type IBMCloudServiceEndpointApplyConfiguration struct { - Name *v1.IBMCloudServiceName `json:"name,omitempty"` - URL *string `json:"url,omitempty"` + Name *configv1.IBMCloudServiceName `json:"name,omitempty"` + URL *string `json:"url,omitempty"` } // IBMCloudServiceEndpointApplyConfiguration constructs a declarative configuration of the IBMCloudServiceEndpoint type for use with @@ -22,7 +22,7 @@ func IBMCloudServiceEndpoint() *IBMCloudServiceEndpointApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *IBMCloudServiceEndpointApplyConfiguration) WithName(value v1.IBMCloudServiceName) *IBMCloudServiceEndpointApplyConfiguration { +func (b *IBMCloudServiceEndpointApplyConfiguration) WithName(value configv1.IBMCloudServiceName) *IBMCloudServiceEndpointApplyConfiguration { b.Name = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go index 35edd9dff..4e726d085 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // IdentityProviderApplyConfiguration represents a declarative configuration of the IdentityProvider type for use // with apply. type IdentityProviderApplyConfiguration struct { - Name *string `json:"name,omitempty"` - MappingMethod *v1.MappingMethodType `json:"mappingMethod,omitempty"` + Name *string `json:"name,omitempty"` + MappingMethod *configv1.MappingMethodType `json:"mappingMethod,omitempty"` IdentityProviderConfigApplyConfiguration `json:",inline"` } @@ -31,7 +31,7 @@ func (b *IdentityProviderApplyConfiguration) WithName(value string) *IdentityPro // WithMappingMethod sets the MappingMethod field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MappingMethod field is set to the value of the last call. -func (b *IdentityProviderApplyConfiguration) WithMappingMethod(value v1.MappingMethodType) *IdentityProviderApplyConfiguration { +func (b *IdentityProviderApplyConfiguration) WithMappingMethod(value configv1.MappingMethodType) *IdentityProviderApplyConfiguration { b.MappingMethod = &value return b } @@ -39,8 +39,8 @@ func (b *IdentityProviderApplyConfiguration) WithMappingMethod(value v1.MappingM // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *IdentityProviderApplyConfiguration) WithType(value v1.IdentityProviderType) *IdentityProviderApplyConfiguration { - b.Type = &value +func (b *IdentityProviderApplyConfiguration) WithType(value configv1.IdentityProviderType) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.Type = &value return b } @@ -48,7 +48,7 @@ func (b *IdentityProviderApplyConfiguration) WithType(value v1.IdentityProviderT // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the BasicAuth field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithBasicAuth(value *BasicAuthIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.BasicAuth = value + b.IdentityProviderConfigApplyConfiguration.BasicAuth = value return b } @@ -56,7 +56,7 @@ func (b *IdentityProviderApplyConfiguration) WithBasicAuth(value *BasicAuthIdent // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GitHub field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithGitHub(value *GitHubIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.GitHub = value + b.IdentityProviderConfigApplyConfiguration.GitHub = value return b } @@ -64,7 +64,7 @@ func (b *IdentityProviderApplyConfiguration) WithGitHub(value *GitHubIdentityPro // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GitLab field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithGitLab(value *GitLabIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.GitLab = value + b.IdentityProviderConfigApplyConfiguration.GitLab = value return b } @@ -72,7 +72,7 @@ func (b *IdentityProviderApplyConfiguration) WithGitLab(value *GitLabIdentityPro // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Google field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithGoogle(value *GoogleIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.Google = value + b.IdentityProviderConfigApplyConfiguration.Google = value return b } @@ -80,7 +80,7 @@ func (b *IdentityProviderApplyConfiguration) WithGoogle(value *GoogleIdentityPro // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTPasswd field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithHTPasswd(value *HTPasswdIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.HTPasswd = value + b.IdentityProviderConfigApplyConfiguration.HTPasswd = value return b } @@ -88,7 +88,7 @@ func (b *IdentityProviderApplyConfiguration) WithHTPasswd(value *HTPasswdIdentit // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Keystone field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithKeystone(value *KeystoneIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.Keystone = value + b.IdentityProviderConfigApplyConfiguration.Keystone = value return b } @@ -96,7 +96,7 @@ func (b *IdentityProviderApplyConfiguration) WithKeystone(value *KeystoneIdentit // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LDAP field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithLDAP(value *LDAPIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.LDAP = value + b.IdentityProviderConfigApplyConfiguration.LDAP = value return b } @@ -104,7 +104,7 @@ func (b *IdentityProviderApplyConfiguration) WithLDAP(value *LDAPIdentityProvide // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OpenID field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithOpenID(value *OpenIDIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.OpenID = value + b.IdentityProviderConfigApplyConfiguration.OpenID = value return b } @@ -112,6 +112,6 @@ func (b *IdentityProviderApplyConfiguration) WithOpenID(value *OpenIDIdentityPro // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RequestHeader field is set to the value of the last call. func (b *IdentityProviderApplyConfiguration) WithRequestHeader(value *RequestHeaderIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { - b.RequestHeader = value + b.IdentityProviderConfigApplyConfiguration.RequestHeader = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go index 208d23d56..1ff6d99a7 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // IdentityProviderConfigApplyConfiguration represents a declarative configuration of the IdentityProviderConfig type for use // with apply. type IdentityProviderConfigApplyConfiguration struct { - Type *v1.IdentityProviderType `json:"type,omitempty"` + Type *configv1.IdentityProviderType `json:"type,omitempty"` BasicAuth *BasicAuthIdentityProviderApplyConfiguration `json:"basicAuth,omitempty"` GitHub *GitHubIdentityProviderApplyConfiguration `json:"github,omitempty"` GitLab *GitLabIdentityProviderApplyConfiguration `json:"gitlab,omitempty"` @@ -30,7 +30,7 @@ func IdentityProviderConfig() *IdentityProviderConfigApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *IdentityProviderConfigApplyConfiguration) WithType(value v1.IdentityProviderType) *IdentityProviderConfigApplyConfiguration { +func (b *IdentityProviderConfigApplyConfiguration) WithType(value configv1.IdentityProviderType) *IdentityProviderConfigApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go index 34f73ad72..63009029e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ImageApplyConfiguration represents a declarative configuration of the Image type for use // with apply. type ImageApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ImageSpecApplyConfiguration `json:"spec,omitempty"` - Status *ImageStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageSpecApplyConfiguration `json:"spec,omitempty"` + Status *ImageStatusApplyConfiguration `json:"status,omitempty"` } // Image constructs a declarative configuration of the Image type for use with @@ -41,18 +41,18 @@ func Image(name string) *ImageApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractImage(image *apiconfigv1.Image, fieldManager string) (*ImageApplyConfiguration, error) { +func ExtractImage(image *configv1.Image, fieldManager string) (*ImageApplyConfiguration, error) { return extractImage(image, fieldManager, "") } // ExtractImageStatus is the same as ExtractImage except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractImageStatus(image *apiconfigv1.Image, fieldManager string) (*ImageApplyConfiguration, error) { +func ExtractImageStatus(image *configv1.Image, fieldManager string) (*ImageApplyConfiguration, error) { return extractImage(image, fieldManager, "status") } -func extractImage(image *apiconfigv1.Image, fieldManager string, subresource string) (*ImageApplyConfiguration, error) { +func extractImage(image *configv1.Image, fieldManager string, subresource string) (*ImageApplyConfiguration, error) { b := &ImageApplyConfiguration{} err := managedfields.ExtractInto(image, internal.Parser().Type("com.github.openshift.api.config.v1.Image"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractImage(image *apiconfigv1.Image, fieldManager string, subresource str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ImageApplyConfiguration) WithKind(value string) *ImageApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ImageApplyConfiguration) WithKind(value string) *ImageApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ImageApplyConfiguration) WithAPIVersion(value string) *ImageApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ImageApplyConfiguration) WithAPIVersion(value string) *ImageApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ImageApplyConfiguration) WithName(value string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ImageApplyConfiguration) WithName(value string) *ImageApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ImageApplyConfiguration) WithGenerateName(value string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ImageApplyConfiguration) WithGenerateName(value string) *ImageApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ImageApplyConfiguration) WithNamespace(value string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ImageApplyConfiguration) WithNamespace(value string) *ImageApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ImageApplyConfiguration) WithUID(value types.UID) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ImageApplyConfiguration) WithUID(value types.UID) *ImageApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ImageApplyConfiguration) WithResourceVersion(value string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ImageApplyConfiguration) WithResourceVersion(value string) *ImageApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ImageApplyConfiguration) WithGeneration(value int64) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ImageApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageApplyConfiguration { +func (b *ImageApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ImageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageApplyConfiguration { +func (b *ImageApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ImageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Imag // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *I // overwriting an existing map entries in Labels field with the same key. func (b *ImageApplyConfiguration) WithLabels(entries map[string]string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ImageApplyConfiguration) WithLabels(entries map[string]string) *ImageAp // overwriting an existing map entries in Annotations field with the same key. func (b *ImageApplyConfiguration) WithAnnotations(entries map[string]string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ImageApplyConfiguration) WithAnnotations(entries map[string]string) *Im // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageApplyConfiguration { +func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ImageApplyConfiguration) WithFinalizers(values ...string) *ImageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ImageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ImageApplyConfiguration) WithStatus(value *ImageStatusApplyConfiguratio // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go index 9566569f9..6d47fac03 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go @@ -3,20 +3,20 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ImageContentPolicyApplyConfiguration represents a declarative configuration of the ImageContentPolicy type for use // with apply. type ImageContentPolicyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ImageContentPolicySpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageContentPolicySpecApplyConfiguration `json:"spec,omitempty"` } // ImageContentPolicy constructs a declarative configuration of the ImageContentPolicy type for use with @@ -40,18 +40,18 @@ func ImageContentPolicy(name string) *ImageContentPolicyApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractImageContentPolicy(imageContentPolicy *apiconfigv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) { +func ExtractImageContentPolicy(imageContentPolicy *configv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) { return extractImageContentPolicy(imageContentPolicy, fieldManager, "") } // ExtractImageContentPolicyStatus is the same as ExtractImageContentPolicy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractImageContentPolicyStatus(imageContentPolicy *apiconfigv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) { +func ExtractImageContentPolicyStatus(imageContentPolicy *configv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) { return extractImageContentPolicy(imageContentPolicy, fieldManager, "status") } -func extractImageContentPolicy(imageContentPolicy *apiconfigv1.ImageContentPolicy, fieldManager string, subresource string) (*ImageContentPolicyApplyConfiguration, error) { +func extractImageContentPolicy(imageContentPolicy *configv1.ImageContentPolicy, fieldManager string, subresource string) (*ImageContentPolicyApplyConfiguration, error) { b := &ImageContentPolicyApplyConfiguration{} err := managedfields.ExtractInto(imageContentPolicy, internal.Parser().Type("com.github.openshift.api.config.v1.ImageContentPolicy"), fieldManager, b, subresource) if err != nil { @@ -68,7 +68,7 @@ func extractImageContentPolicy(imageContentPolicy *apiconfigv1.ImageContentPolic // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithKind(value string) *ImageContentPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -76,7 +76,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithKind(value string) *ImageCont // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithAPIVersion(value string) *ImageContentPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -85,7 +85,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithAPIVersion(value string) *Ima // If called multiple times, the Name field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithName(value string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -94,7 +94,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithName(value string) *ImageCont // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithGenerateName(value string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -103,7 +103,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithGenerateName(value string) *I // If called multiple times, the Namespace field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithNamespace(value string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -112,7 +112,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithNamespace(value string) *Imag // If called multiple times, the UID field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithUID(value types.UID) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -121,7 +121,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithUID(value types.UID) *ImageCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithResourceVersion(value string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -130,25 +130,25 @@ func (b *ImageContentPolicyApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithGeneration(value int64) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ImageContentPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageContentPolicyApplyConfiguration { +func (b *ImageContentPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ImageContentPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageContentPolicyApplyConfiguration { +func (b *ImageContentPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -157,7 +157,7 @@ func (b *ImageContentPolicyApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ImageContentPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -167,11 +167,11 @@ func (b *ImageContentPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ImageContentPolicyApplyConfiguration) WithLabels(entries map[string]string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -182,11 +182,11 @@ func (b *ImageContentPolicyApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ImageContentPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -194,13 +194,13 @@ func (b *ImageContentPolicyApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ImageContentPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageContentPolicyApplyConfiguration { +func (b *ImageContentPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -211,14 +211,14 @@ func (b *ImageContentPolicyApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ImageContentPolicyApplyConfiguration) WithFinalizers(values ...string) *ImageContentPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ImageContentPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -233,5 +233,5 @@ func (b *ImageContentPolicyApplyConfiguration) WithSpec(value *ImageContentPolic // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageContentPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go index dc9889674..d6c57cb7f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ImageDigestMirrorsApplyConfiguration represents a declarative configuration of the ImageDigestMirrors type for use // with apply. type ImageDigestMirrorsApplyConfiguration struct { - Source *string `json:"source,omitempty"` - Mirrors []v1.ImageMirror `json:"mirrors,omitempty"` - MirrorSourcePolicy *v1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` + Source *string `json:"source,omitempty"` + Mirrors []configv1.ImageMirror `json:"mirrors,omitempty"` + MirrorSourcePolicy *configv1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` } // ImageDigestMirrorsApplyConfiguration constructs a declarative configuration of the ImageDigestMirrors type for use with @@ -31,7 +31,7 @@ func (b *ImageDigestMirrorsApplyConfiguration) WithSource(value string) *ImageDi // WithMirrors adds the given value to the Mirrors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Mirrors field. -func (b *ImageDigestMirrorsApplyConfiguration) WithMirrors(values ...v1.ImageMirror) *ImageDigestMirrorsApplyConfiguration { +func (b *ImageDigestMirrorsApplyConfiguration) WithMirrors(values ...configv1.ImageMirror) *ImageDigestMirrorsApplyConfiguration { for i := range values { b.Mirrors = append(b.Mirrors, values[i]) } @@ -41,7 +41,7 @@ func (b *ImageDigestMirrorsApplyConfiguration) WithMirrors(values ...v1.ImageMir // WithMirrorSourcePolicy sets the MirrorSourcePolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MirrorSourcePolicy field is set to the value of the last call. -func (b *ImageDigestMirrorsApplyConfiguration) WithMirrorSourcePolicy(value v1.MirrorSourcePolicy) *ImageDigestMirrorsApplyConfiguration { +func (b *ImageDigestMirrorsApplyConfiguration) WithMirrorSourcePolicy(value configv1.MirrorSourcePolicy) *ImageDigestMirrorsApplyConfiguration { b.MirrorSourcePolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go index 80140961b..f3c5ca21a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ImageDigestMirrorSetApplyConfiguration represents a declarative configuration of the ImageDigestMirrorSet type for use // with apply. type ImageDigestMirrorSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ImageDigestMirrorSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.ImageDigestMirrorSetStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageDigestMirrorSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.ImageDigestMirrorSetStatus `json:"status,omitempty"` } // ImageDigestMirrorSet constructs a declarative configuration of the ImageDigestMirrorSet type for use with @@ -41,18 +41,18 @@ func ImageDigestMirrorSet(name string) *ImageDigestMirrorSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractImageDigestMirrorSet(imageDigestMirrorSet *apiconfigv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) { +func ExtractImageDigestMirrorSet(imageDigestMirrorSet *configv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) { return extractImageDigestMirrorSet(imageDigestMirrorSet, fieldManager, "") } // ExtractImageDigestMirrorSetStatus is the same as ExtractImageDigestMirrorSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractImageDigestMirrorSetStatus(imageDigestMirrorSet *apiconfigv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) { +func ExtractImageDigestMirrorSetStatus(imageDigestMirrorSet *configv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) { return extractImageDigestMirrorSet(imageDigestMirrorSet, fieldManager, "status") } -func extractImageDigestMirrorSet(imageDigestMirrorSet *apiconfigv1.ImageDigestMirrorSet, fieldManager string, subresource string) (*ImageDigestMirrorSetApplyConfiguration, error) { +func extractImageDigestMirrorSet(imageDigestMirrorSet *configv1.ImageDigestMirrorSet, fieldManager string, subresource string) (*ImageDigestMirrorSetApplyConfiguration, error) { b := &ImageDigestMirrorSetApplyConfiguration{} err := managedfields.ExtractInto(imageDigestMirrorSet, internal.Parser().Type("com.github.openshift.api.config.v1.ImageDigestMirrorSet"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractImageDigestMirrorSet(imageDigestMirrorSet *apiconfigv1.ImageDigestMi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithKind(value string) *ImageDigestMirrorSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithKind(value string) *ImageDi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithAPIVersion(value string) *ImageDigestMirrorSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithAPIVersion(value string) *I // If called multiple times, the Name field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithName(value string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithName(value string) *ImageDi // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithGenerateName(value string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithNamespace(value string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithNamespace(value string) *Im // If called multiple times, the UID field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithUID(value types.UID) *Image // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithResourceVersion(value string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithResourceVersion(value strin // If called multiple times, the Generation field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithGeneration(value int64) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ImageDigestMirrorSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageDigestMirrorSetApplyConfiguration { +func (b *ImageDigestMirrorSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageDigestMirrorSetApplyConfiguration { +func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionTimestamp(value met // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds( // overwriting an existing map entries in Labels field with the same key. func (b *ImageDigestMirrorSetApplyConfiguration) WithLabels(entries map[string]string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithLabels(entries map[string]s // overwriting an existing map entries in Annotations field with the same key. func (b *ImageDigestMirrorSetApplyConfiguration) WithAnnotations(entries map[string]string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithAnnotations(entries map[str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ImageDigestMirrorSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageDigestMirrorSetApplyConfiguration { +func (b *ImageDigestMirrorSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithOwnerReferences(values ...* func (b *ImageDigestMirrorSetApplyConfiguration) WithFinalizers(values ...string) *ImageDigestMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ImageDigestMirrorSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithSpec(value *ImageDigestMirr // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.ImageDigestMirrorSetStatus) *ImageDigestMirrorSetApplyConfiguration { +func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value configv1.ImageDigestMirrorSetStatus) *ImageDigestMirrorSetApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.Im // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageDigestMirrorSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go index 601accbe6..cbf8a208a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ImageStatusApplyConfiguration represents a declarative configuration of the ImageStatus type for use // with apply. type ImageStatusApplyConfiguration struct { - InternalRegistryHostname *string `json:"internalRegistryHostname,omitempty"` - ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` - ImageStreamImportMode *v1.ImportModeType `json:"imageStreamImportMode,omitempty"` + InternalRegistryHostname *string `json:"internalRegistryHostname,omitempty"` + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + ImageStreamImportMode *configv1.ImportModeType `json:"imageStreamImportMode,omitempty"` } // ImageStatusApplyConfiguration constructs a declarative configuration of the ImageStatus type for use with @@ -41,7 +41,7 @@ func (b *ImageStatusApplyConfiguration) WithExternalRegistryHostnames(values ... // WithImageStreamImportMode sets the ImageStreamImportMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ImageStreamImportMode field is set to the value of the last call. -func (b *ImageStatusApplyConfiguration) WithImageStreamImportMode(value v1.ImportModeType) *ImageStatusApplyConfiguration { +func (b *ImageStatusApplyConfiguration) WithImageStreamImportMode(value configv1.ImportModeType) *ImageStatusApplyConfiguration { b.ImageStreamImportMode = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go index bede604d8..e0baa99fc 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ImageTagMirrorsApplyConfiguration represents a declarative configuration of the ImageTagMirrors type for use // with apply. type ImageTagMirrorsApplyConfiguration struct { - Source *string `json:"source,omitempty"` - Mirrors []v1.ImageMirror `json:"mirrors,omitempty"` - MirrorSourcePolicy *v1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` + Source *string `json:"source,omitempty"` + Mirrors []configv1.ImageMirror `json:"mirrors,omitempty"` + MirrorSourcePolicy *configv1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` } // ImageTagMirrorsApplyConfiguration constructs a declarative configuration of the ImageTagMirrors type for use with @@ -31,7 +31,7 @@ func (b *ImageTagMirrorsApplyConfiguration) WithSource(value string) *ImageTagMi // WithMirrors adds the given value to the Mirrors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Mirrors field. -func (b *ImageTagMirrorsApplyConfiguration) WithMirrors(values ...v1.ImageMirror) *ImageTagMirrorsApplyConfiguration { +func (b *ImageTagMirrorsApplyConfiguration) WithMirrors(values ...configv1.ImageMirror) *ImageTagMirrorsApplyConfiguration { for i := range values { b.Mirrors = append(b.Mirrors, values[i]) } @@ -41,7 +41,7 @@ func (b *ImageTagMirrorsApplyConfiguration) WithMirrors(values ...v1.ImageMirror // WithMirrorSourcePolicy sets the MirrorSourcePolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MirrorSourcePolicy field is set to the value of the last call. -func (b *ImageTagMirrorsApplyConfiguration) WithMirrorSourcePolicy(value v1.MirrorSourcePolicy) *ImageTagMirrorsApplyConfiguration { +func (b *ImageTagMirrorsApplyConfiguration) WithMirrorSourcePolicy(value configv1.MirrorSourcePolicy) *ImageTagMirrorsApplyConfiguration { b.MirrorSourcePolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go index 37432fac0..b8a9de192 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ImageTagMirrorSetApplyConfiguration represents a declarative configuration of the ImageTagMirrorSet type for use // with apply. type ImageTagMirrorSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ImageTagMirrorSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.ImageTagMirrorSetStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageTagMirrorSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.ImageTagMirrorSetStatus `json:"status,omitempty"` } // ImageTagMirrorSet constructs a declarative configuration of the ImageTagMirrorSet type for use with @@ -41,18 +41,18 @@ func ImageTagMirrorSet(name string) *ImageTagMirrorSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractImageTagMirrorSet(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) { +func ExtractImageTagMirrorSet(imageTagMirrorSet *configv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) { return extractImageTagMirrorSet(imageTagMirrorSet, fieldManager, "") } // ExtractImageTagMirrorSetStatus is the same as ExtractImageTagMirrorSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractImageTagMirrorSetStatus(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) { +func ExtractImageTagMirrorSetStatus(imageTagMirrorSet *configv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) { return extractImageTagMirrorSet(imageTagMirrorSet, fieldManager, "status") } -func extractImageTagMirrorSet(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, fieldManager string, subresource string) (*ImageTagMirrorSetApplyConfiguration, error) { +func extractImageTagMirrorSet(imageTagMirrorSet *configv1.ImageTagMirrorSet, fieldManager string, subresource string) (*ImageTagMirrorSetApplyConfiguration, error) { b := &ImageTagMirrorSetApplyConfiguration{} err := managedfields.ExtractInto(imageTagMirrorSet, internal.Parser().Type("com.github.openshift.api.config.v1.ImageTagMirrorSet"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractImageTagMirrorSet(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithKind(value string) *ImageTagMirrorSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithKind(value string) *ImageTagMi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithAPIVersion(value string) *ImageTagMirrorSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithAPIVersion(value string) *Imag // If called multiple times, the Name field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithName(value string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithName(value string) *ImageTagMi // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithGenerateName(value string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithGenerateName(value string) *Im // If called multiple times, the Namespace field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithNamespace(value string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithNamespace(value string) *Image // If called multiple times, the UID field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageTag // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithResourceVersion(value string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithGeneration(value int64) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ImageTagMirrorSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageTagMirrorSetApplyConfiguration { +func (b *ImageTagMirrorSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageTagMirrorSetApplyConfiguration { +func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionTimestamp(value metav1 // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(val // overwriting an existing map entries in Labels field with the same key. func (b *ImageTagMirrorSetApplyConfiguration) WithLabels(entries map[string]string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithLabels(entries map[string]stri // overwriting an existing map entries in Annotations field with the same key. func (b *ImageTagMirrorSetApplyConfiguration) WithAnnotations(entries map[string]string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ImageTagMirrorSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageTagMirrorSetApplyConfiguration { +func (b *ImageTagMirrorSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithOwnerReferences(values ...*v1. func (b *ImageTagMirrorSetApplyConfiguration) WithFinalizers(values ...string) *ImageTagMirrorSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ImageTagMirrorSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithSpec(value *ImageTagMirrorSetS // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.ImageTagMirrorSetStatus) *ImageTagMirrorSetApplyConfiguration { +func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value configv1.ImageTagMirrorSetStatus) *ImageTagMirrorSetApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.Image // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageTagMirrorSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go index 5d9551be6..970391cfa 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // InfrastructureApplyConfiguration represents a declarative configuration of the Infrastructure type for use // with apply. type InfrastructureApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *InfrastructureSpecApplyConfiguration `json:"spec,omitempty"` - Status *InfrastructureStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InfrastructureSpecApplyConfiguration `json:"spec,omitempty"` + Status *InfrastructureStatusApplyConfiguration `json:"status,omitempty"` } // Infrastructure constructs a declarative configuration of the Infrastructure type for use with @@ -41,18 +41,18 @@ func Infrastructure(name string) *InfrastructureApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractInfrastructure(infrastructure *apiconfigv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) { +func ExtractInfrastructure(infrastructure *configv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) { return extractInfrastructure(infrastructure, fieldManager, "") } // ExtractInfrastructureStatus is the same as ExtractInfrastructure except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractInfrastructureStatus(infrastructure *apiconfigv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) { +func ExtractInfrastructureStatus(infrastructure *configv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) { return extractInfrastructure(infrastructure, fieldManager, "status") } -func extractInfrastructure(infrastructure *apiconfigv1.Infrastructure, fieldManager string, subresource string) (*InfrastructureApplyConfiguration, error) { +func extractInfrastructure(infrastructure *configv1.Infrastructure, fieldManager string, subresource string) (*InfrastructureApplyConfiguration, error) { b := &InfrastructureApplyConfiguration{} err := managedfields.ExtractInto(infrastructure, internal.Parser().Type("com.github.openshift.api.config.v1.Infrastructure"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractInfrastructure(infrastructure *apiconfigv1.Infrastructure, fieldMana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithKind(value string) *InfrastructureApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *InfrastructureApplyConfiguration) WithKind(value string) *Infrastructur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithAPIVersion(value string) *InfrastructureApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *InfrastructureApplyConfiguration) WithAPIVersion(value string) *Infrast // If called multiple times, the Name field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithName(value string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *InfrastructureApplyConfiguration) WithName(value string) *Infrastructur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithGenerateName(value string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *InfrastructureApplyConfiguration) WithGenerateName(value string) *Infra // If called multiple times, the Namespace field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithNamespace(value string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *InfrastructureApplyConfiguration) WithNamespace(value string) *Infrastr // If called multiple times, the UID field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithUID(value types.UID) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *InfrastructureApplyConfiguration) WithUID(value types.UID) *Infrastruct // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithResourceVersion(value string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *InfrastructureApplyConfiguration) WithResourceVersion(value string) *In // If called multiple times, the Generation field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithGeneration(value int64) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *InfrastructureApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InfrastructureApplyConfiguration { +func (b *InfrastructureApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *InfrastructureApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InfrastructureApplyConfiguration { +func (b *InfrastructureApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *InfrastructureApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *InfrastructureApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *InfrastructureApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *InfrastructureApplyConfiguration) WithLabels(entries map[string]string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *InfrastructureApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *InfrastructureApplyConfiguration) WithAnnotations(entries map[string]string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *InfrastructureApplyConfiguration) WithAnnotations(entries map[string]st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *InfrastructureApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InfrastructureApplyConfiguration { +func (b *InfrastructureApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *InfrastructureApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *InfrastructureApplyConfiguration) WithFinalizers(values ...string) *InfrastructureApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *InfrastructureApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *InfrastructureApplyConfiguration) WithStatus(value *InfrastructureStatu // GetName retrieves the value of the Name field in the declarative configuration. func (b *InfrastructureApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go index 89f5496df..5b5d8288c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go @@ -3,21 +3,21 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // InfrastructureStatusApplyConfiguration represents a declarative configuration of the InfrastructureStatus type for use // with apply. type InfrastructureStatusApplyConfiguration struct { InfrastructureName *string `json:"infrastructureName,omitempty"` - Platform *v1.PlatformType `json:"platform,omitempty"` + Platform *configv1.PlatformType `json:"platform,omitempty"` PlatformStatus *PlatformStatusApplyConfiguration `json:"platformStatus,omitempty"` EtcdDiscoveryDomain *string `json:"etcdDiscoveryDomain,omitempty"` APIServerURL *string `json:"apiServerURL,omitempty"` APIServerInternalURL *string `json:"apiServerInternalURI,omitempty"` - ControlPlaneTopology *v1.TopologyMode `json:"controlPlaneTopology,omitempty"` - InfrastructureTopology *v1.TopologyMode `json:"infrastructureTopology,omitempty"` - CPUPartitioning *v1.CPUPartitioningMode `json:"cpuPartitioning,omitempty"` + ControlPlaneTopology *configv1.TopologyMode `json:"controlPlaneTopology,omitempty"` + InfrastructureTopology *configv1.TopologyMode `json:"infrastructureTopology,omitempty"` + CPUPartitioning *configv1.CPUPartitioningMode `json:"cpuPartitioning,omitempty"` } // InfrastructureStatusApplyConfiguration constructs a declarative configuration of the InfrastructureStatus type for use with @@ -37,7 +37,7 @@ func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureName(value st // WithPlatform sets the Platform field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Platform field is set to the value of the last call. -func (b *InfrastructureStatusApplyConfiguration) WithPlatform(value v1.PlatformType) *InfrastructureStatusApplyConfiguration { +func (b *InfrastructureStatusApplyConfiguration) WithPlatform(value configv1.PlatformType) *InfrastructureStatusApplyConfiguration { b.Platform = &value return b } @@ -77,7 +77,7 @@ func (b *InfrastructureStatusApplyConfiguration) WithAPIServerInternalURL(value // WithControlPlaneTopology sets the ControlPlaneTopology field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ControlPlaneTopology field is set to the value of the last call. -func (b *InfrastructureStatusApplyConfiguration) WithControlPlaneTopology(value v1.TopologyMode) *InfrastructureStatusApplyConfiguration { +func (b *InfrastructureStatusApplyConfiguration) WithControlPlaneTopology(value configv1.TopologyMode) *InfrastructureStatusApplyConfiguration { b.ControlPlaneTopology = &value return b } @@ -85,7 +85,7 @@ func (b *InfrastructureStatusApplyConfiguration) WithControlPlaneTopology(value // WithInfrastructureTopology sets the InfrastructureTopology field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InfrastructureTopology field is set to the value of the last call. -func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureTopology(value v1.TopologyMode) *InfrastructureStatusApplyConfiguration { +func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureTopology(value configv1.TopologyMode) *InfrastructureStatusApplyConfiguration { b.InfrastructureTopology = &value return b } @@ -93,7 +93,7 @@ func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureTopology(valu // WithCPUPartitioning sets the CPUPartitioning field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CPUPartitioning field is set to the value of the last call. -func (b *InfrastructureStatusApplyConfiguration) WithCPUPartitioning(value v1.CPUPartitioningMode) *InfrastructureStatusApplyConfiguration { +func (b *InfrastructureStatusApplyConfiguration) WithCPUPartitioning(value configv1.CPUPartitioningMode) *InfrastructureStatusApplyConfiguration { b.CPUPartitioning = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go index b8780886d..945bacf8a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` - Status *IngressStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` + Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } // Ingress constructs a declarative configuration of the Ingress type for use with @@ -41,18 +41,18 @@ func Ingress(name string) *IngressApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractIngress(ingress *apiconfigv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { +func ExtractIngress(ingress *configv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { return extractIngress(ingress, fieldManager, "") } // ExtractIngressStatus is the same as ExtractIngress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractIngressStatus(ingress *apiconfigv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { +func ExtractIngressStatus(ingress *configv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { return extractIngress(ingress, fieldManager, "status") } -func extractIngress(ingress *apiconfigv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { +func extractIngress(ingress *configv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { b := &IngressApplyConfiguration{} err := managedfields.ExtractInto(ingress, internal.Parser().Type("com.github.openshift.api.config.v1.Ingress"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractIngress(ingress *apiconfigv1.Ingress, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go index f3e25215b..ed5c26531 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // IngressPlatformSpecApplyConfiguration represents a declarative configuration of the IngressPlatformSpec type for use // with apply. type IngressPlatformSpecApplyConfiguration struct { - Type *v1.PlatformType `json:"type,omitempty"` + Type *configv1.PlatformType `json:"type,omitempty"` AWS *AWSIngressSpecApplyConfiguration `json:"aws,omitempty"` } @@ -22,7 +22,7 @@ func IngressPlatformSpec() *IngressPlatformSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *IngressPlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *IngressPlatformSpecApplyConfiguration { +func (b *IngressPlatformSpecApplyConfiguration) WithType(value configv1.PlatformType) *IngressPlatformSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go index 88204c69a..abbb9ef15 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go @@ -19,7 +19,7 @@ func KeystoneIdentityProvider() *KeystoneIdentityProviderApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the URL field is set to the value of the last call. func (b *KeystoneIdentityProviderApplyConfiguration) WithURL(value string) *KeystoneIdentityProviderApplyConfiguration { - b.URL = &value + b.OAuthRemoteConnectionInfoApplyConfiguration.URL = &value return b } @@ -27,7 +27,7 @@ func (b *KeystoneIdentityProviderApplyConfiguration) WithURL(value string) *Keys // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CA field is set to the value of the last call. func (b *KeystoneIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration { - b.CA = value + b.OAuthRemoteConnectionInfoApplyConfiguration.CA = value return b } @@ -35,7 +35,7 @@ func (b *KeystoneIdentityProviderApplyConfiguration) WithCA(value *ConfigMapName // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSClientCert field is set to the value of the last call. func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration { - b.TLSClientCert = value + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientCert = value return b } @@ -43,7 +43,7 @@ func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientCert(value *Se // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSClientKey field is set to the value of the last call. func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration { - b.TLSClientKey = value + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientKey = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go index a42f3a9ea..195594ead 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NetworkApplyConfiguration represents a declarative configuration of the Network type for use // with apply. type NetworkApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"` - Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"` + Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` } // Network constructs a declarative configuration of the Network type for use with @@ -41,18 +41,18 @@ func Network(name string) *NetworkApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNetwork(network *apiconfigv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { +func ExtractNetwork(network *configv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { return extractNetwork(network, fieldManager, "") } // ExtractNetworkStatus is the same as ExtractNetwork except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNetworkStatus(network *apiconfigv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { +func ExtractNetworkStatus(network *configv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { return extractNetwork(network, fieldManager, "status") } -func extractNetwork(network *apiconfigv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) { +func extractNetwork(network *configv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) { b := &NetworkApplyConfiguration{} err := managedfields.ExtractInto(network, internal.Parser().Type("com.github.openshift.api.config.v1.Network"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractNetwork(network *apiconfigv1.Network, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkAp // If called multiple times, the Generation field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithGeneration(value int64) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NetworkApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkApplyConfiguration { +func (b *NetworkApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkApplyConfiguration { +func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Ne // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *Netwo // overwriting an existing map entries in Annotations field with the same key. func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration { +func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *NetworkApplyConfiguration) WithFinalizers(values ...string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *NetworkApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go index 6f75e0385..a2624dc5b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // NetworkDiagnosticsApplyConfiguration represents a declarative configuration of the NetworkDiagnostics type for use // with apply. type NetworkDiagnosticsApplyConfiguration struct { - Mode *v1.NetworkDiagnosticsMode `json:"mode,omitempty"` + Mode *configv1.NetworkDiagnosticsMode `json:"mode,omitempty"` SourcePlacement *NetworkDiagnosticsSourcePlacementApplyConfiguration `json:"sourcePlacement,omitempty"` TargetPlacement *NetworkDiagnosticsTargetPlacementApplyConfiguration `json:"targetPlacement,omitempty"` } @@ -23,7 +23,7 @@ func NetworkDiagnostics() *NetworkDiagnosticsApplyConfiguration { // WithMode sets the Mode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Mode field is set to the value of the last call. -func (b *NetworkDiagnosticsApplyConfiguration) WithMode(value v1.NetworkDiagnosticsMode) *NetworkDiagnosticsApplyConfiguration { +func (b *NetworkDiagnosticsApplyConfiguration) WithMode(value configv1.NetworkDiagnosticsMode) *NetworkDiagnosticsApplyConfiguration { b.Mode = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go index 2b280a828..a1960ba9f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NetworkDiagnosticsSourcePlacementApplyConfiguration represents a declarative configuration of the NetworkDiagnosticsSourcePlacement type for use // with apply. type NetworkDiagnosticsSourcePlacementApplyConfiguration struct { - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // NetworkDiagnosticsSourcePlacementApplyConfiguration constructs a declarative configuration of the NetworkDiagnosticsSourcePlacement type for use with @@ -36,7 +36,7 @@ func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithNodeSelector(e // WithTolerations adds the given value to the Tolerations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Tolerations field. -func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsSourcePlacementApplyConfiguration { +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *NetworkDiagnosticsSourcePlacementApplyConfiguration { for i := range values { b.Tolerations = append(b.Tolerations, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go index 6fee4c795..ba0dbab8a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NetworkDiagnosticsTargetPlacementApplyConfiguration represents a declarative configuration of the NetworkDiagnosticsTargetPlacement type for use // with apply. type NetworkDiagnosticsTargetPlacementApplyConfiguration struct { - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // NetworkDiagnosticsTargetPlacementApplyConfiguration constructs a declarative configuration of the NetworkDiagnosticsTargetPlacement type for use with @@ -36,7 +36,7 @@ func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithNodeSelector(e // WithTolerations adds the given value to the Tolerations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Tolerations field. -func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsTargetPlacementApplyConfiguration { +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *NetworkDiagnosticsTargetPlacementApplyConfiguration { for i := range values { b.Tolerations = append(b.Tolerations, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go index ac4deeb66..611705623 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` - Status *NodeStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } // Node constructs a declarative configuration of the Node type for use with @@ -41,18 +41,18 @@ func Node(name string) *NodeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNode(node *apiconfigv1.Node, fieldManager string) (*NodeApplyConfiguration, error) { +func ExtractNode(node *configv1.Node, fieldManager string) (*NodeApplyConfiguration, error) { return extractNode(node, fieldManager, "") } // ExtractNodeStatus is the same as ExtractNode except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNodeStatus(node *apiconfigv1.Node, fieldManager string) (*NodeApplyConfiguration, error) { +func ExtractNodeStatus(node *configv1.Node, fieldManager string) (*NodeApplyConfiguration, error) { return extractNode(node, fieldManager, "status") } -func extractNode(node *apiconfigv1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { +func extractNode(node *configv1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { b := &NodeApplyConfiguration{} err := managedfields.ExtractInto(node, internal.Parser().Type("com.github.openshift.api.config.v1.Node"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractNode(node *apiconfigv1.Node, fieldManager string, subresource string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *NodeApplyConfiguration) WithGeneration(value int64) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *No // overwriting an existing map entries in Labels field with the same key. func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeAppl // overwriting an existing map entries in Annotations field with the same key. func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *Nod // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) // GetName retrieves the value of the Name field in the declarative configuration. func (b *NodeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go index 37965a138..a0732e78a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go @@ -3,14 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { - CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"` - WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + CgroupMode *configv1.CgroupMode `json:"cgroupMode,omitempty"` + WorkerLatencyProfile *configv1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + MinimumKubeletVersion *string `json:"minimumKubeletVersion,omitempty"` } // NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with @@ -22,7 +23,7 @@ func NodeSpec() *NodeSpecApplyConfiguration { // WithCgroupMode sets the CgroupMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CgroupMode field is set to the value of the last call. -func (b *NodeSpecApplyConfiguration) WithCgroupMode(value v1.CgroupMode) *NodeSpecApplyConfiguration { +func (b *NodeSpecApplyConfiguration) WithCgroupMode(value configv1.CgroupMode) *NodeSpecApplyConfiguration { b.CgroupMode = &value return b } @@ -30,7 +31,15 @@ func (b *NodeSpecApplyConfiguration) WithCgroupMode(value v1.CgroupMode) *NodeSp // WithWorkerLatencyProfile sets the WorkerLatencyProfile field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WorkerLatencyProfile field is set to the value of the last call. -func (b *NodeSpecApplyConfiguration) WithWorkerLatencyProfile(value v1.WorkerLatencyProfileType) *NodeSpecApplyConfiguration { +func (b *NodeSpecApplyConfiguration) WithWorkerLatencyProfile(value configv1.WorkerLatencyProfileType) *NodeSpecApplyConfiguration { b.WorkerLatencyProfile = &value return b } + +// WithMinimumKubeletVersion sets the MinimumKubeletVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinimumKubeletVersion field is set to the value of the last call. +func (b *NodeSpecApplyConfiguration) WithMinimumKubeletVersion(value string) *NodeSpecApplyConfiguration { + b.MinimumKubeletVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go index e36466526..ee6ebd99e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } // NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with @@ -21,7 +21,7 @@ func NodeStatus() *NodeStatusApplyConfiguration { // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NodeStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *NodeStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go index 3578f512b..84d3b7ade 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // NutanixPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the NutanixPlatformLoadBalancer type for use // with apply. type NutanixPlatformLoadBalancerApplyConfiguration struct { - Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` } // NutanixPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the NutanixPlatformLoadBalancer type for use with @@ -21,7 +21,7 @@ func NutanixPlatformLoadBalancer() *NutanixPlatformLoadBalancerApplyConfiguratio // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NutanixPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *NutanixPlatformLoadBalancerApplyConfiguration { +func (b *NutanixPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *NutanixPlatformLoadBalancerApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go index f06b7c5d9..5e9b095d8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // NutanixResourceIdentifierApplyConfiguration represents a declarative configuration of the NutanixResourceIdentifier type for use // with apply. type NutanixResourceIdentifierApplyConfiguration struct { - Type *v1.NutanixIdentifierType `json:"type,omitempty"` - UUID *string `json:"uuid,omitempty"` - Name *string `json:"name,omitempty"` + Type *configv1.NutanixIdentifierType `json:"type,omitempty"` + UUID *string `json:"uuid,omitempty"` + Name *string `json:"name,omitempty"` } // NutanixResourceIdentifierApplyConfiguration constructs a declarative configuration of the NutanixResourceIdentifier type for use with @@ -23,7 +23,7 @@ func NutanixResourceIdentifier() *NutanixResourceIdentifierApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NutanixResourceIdentifierApplyConfiguration) WithType(value v1.NutanixIdentifierType) *NutanixResourceIdentifierApplyConfiguration { +func (b *NutanixResourceIdentifierApplyConfiguration) WithType(value configv1.NutanixIdentifierType) *NutanixResourceIdentifierApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go index 61d6739c4..1c9589c08 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // OAuthApplyConfiguration represents a declarative configuration of the OAuth type for use // with apply. type OAuthApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *OAuthSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.OAuthStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OAuthSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.OAuthStatus `json:"status,omitempty"` } // OAuth constructs a declarative configuration of the OAuth type for use with @@ -41,18 +41,18 @@ func OAuth(name string) *OAuthApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractOAuth(oAuth *apiconfigv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) { +func ExtractOAuth(oAuth *configv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) { return extractOAuth(oAuth, fieldManager, "") } // ExtractOAuthStatus is the same as ExtractOAuth except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractOAuthStatus(oAuth *apiconfigv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) { +func ExtractOAuthStatus(oAuth *configv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) { return extractOAuth(oAuth, fieldManager, "status") } -func extractOAuth(oAuth *apiconfigv1.OAuth, fieldManager string, subresource string) (*OAuthApplyConfiguration, error) { +func extractOAuth(oAuth *configv1.OAuth, fieldManager string, subresource string) (*OAuthApplyConfiguration, error) { b := &OAuthApplyConfiguration{} err := managedfields.ExtractInto(oAuth, internal.Parser().Type("com.github.openshift.api.config.v1.OAuth"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractOAuth(oAuth *apiconfigv1.OAuth, fieldManager string, subresource str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithKind(value string) *OAuthApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *OAuthApplyConfiguration) WithKind(value string) *OAuthApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithAPIVersion(value string) *OAuthApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *OAuthApplyConfiguration) WithAPIVersion(value string) *OAuthApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithName(value string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *OAuthApplyConfiguration) WithName(value string) *OAuthApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithGenerateName(value string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *OAuthApplyConfiguration) WithGenerateName(value string) *OAuthApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithNamespace(value string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *OAuthApplyConfiguration) WithNamespace(value string) *OAuthApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithUID(value types.UID) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *OAuthApplyConfiguration) WithUID(value types.UID) *OAuthApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithResourceVersion(value string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *OAuthApplyConfiguration) WithResourceVersion(value string) *OAuthApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithGeneration(value int64) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *OAuthApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OAuthApplyConfiguration { +func (b *OAuthApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *OAuthApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OAuthApplyConfiguration { +func (b *OAuthApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *OAuthApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OAut // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *OAuthApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *OAuthApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *O // overwriting an existing map entries in Labels field with the same key. func (b *OAuthApplyConfiguration) WithLabels(entries map[string]string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *OAuthApplyConfiguration) WithLabels(entries map[string]string) *OAuthAp // overwriting an existing map entries in Annotations field with the same key. func (b *OAuthApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *OAuthApplyConfiguration) WithAnnotations(entries map[string]string) *OA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *OAuthApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OAuthApplyConfiguration { +func (b *OAuthApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *OAuthApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *OAuthApplyConfiguration) WithFinalizers(values ...string) *OAuthApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *OAuthApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *OAuthApplyConfiguration) WithSpec(value *OAuthSpecApplyConfiguration) * // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *OAuthApplyConfiguration) WithStatus(value apiconfigv1.OAuthStatus) *OAuthApplyConfiguration { +func (b *OAuthApplyConfiguration) WithStatus(value configv1.OAuthStatus) *OAuthApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *OAuthApplyConfiguration) WithStatus(value apiconfigv1.OAuthStatus) *OAu // GetName retrieves the value of the Name field in the declarative configuration. func (b *OAuthApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go index 18c2a768f..8f11192c5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // OpenIDClaimsApplyConfiguration represents a declarative configuration of the OpenIDClaims type for use // with apply. type OpenIDClaimsApplyConfiguration struct { - PreferredUsername []string `json:"preferredUsername,omitempty"` - Name []string `json:"name,omitempty"` - Email []string `json:"email,omitempty"` - Groups []v1.OpenIDClaim `json:"groups,omitempty"` + PreferredUsername []string `json:"preferredUsername,omitempty"` + Name []string `json:"name,omitempty"` + Email []string `json:"email,omitempty"` + Groups []configv1.OpenIDClaim `json:"groups,omitempty"` } // OpenIDClaimsApplyConfiguration constructs a declarative configuration of the OpenIDClaims type for use with @@ -54,7 +54,7 @@ func (b *OpenIDClaimsApplyConfiguration) WithEmail(values ...string) *OpenIDClai // WithGroups adds the given value to the Groups field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Groups field. -func (b *OpenIDClaimsApplyConfiguration) WithGroups(values ...v1.OpenIDClaim) *OpenIDClaimsApplyConfiguration { +func (b *OpenIDClaimsApplyConfiguration) WithGroups(values ...configv1.OpenIDClaim) *OpenIDClaimsApplyConfiguration { for i := range values { b.Groups = append(b.Groups, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go index 3e4990789..f65d682d5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // OpenStackPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the OpenStackPlatformLoadBalancer type for use // with apply. type OpenStackPlatformLoadBalancerApplyConfiguration struct { - Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` } // OpenStackPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the OpenStackPlatformLoadBalancer type for use with @@ -21,7 +21,7 @@ func OpenStackPlatformLoadBalancer() *OpenStackPlatformLoadBalancerApplyConfigur // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *OpenStackPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *OpenStackPlatformLoadBalancerApplyConfiguration { +func (b *OpenStackPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *OpenStackPlatformLoadBalancerApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go index 393a1bb93..af43c8330 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // OpenStackPlatformSpecApplyConfiguration represents a declarative configuration of the OpenStackPlatformSpec type for use // with apply. type OpenStackPlatformSpecApplyConfiguration struct { - APIServerInternalIPs []v1.IP `json:"apiServerInternalIPs,omitempty"` - IngressIPs []v1.IP `json:"ingressIPs,omitempty"` - MachineNetworks []v1.CIDR `json:"machineNetworks,omitempty"` + APIServerInternalIPs []configv1.IP `json:"apiServerInternalIPs,omitempty"` + IngressIPs []configv1.IP `json:"ingressIPs,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } // OpenStackPlatformSpecApplyConfiguration constructs a declarative configuration of the OpenStackPlatformSpec type for use with @@ -23,7 +23,7 @@ func OpenStackPlatformSpec() *OpenStackPlatformSpecApplyConfiguration { // WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. -func (b *OpenStackPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...v1.IP) *OpenStackPlatformSpecApplyConfiguration { +func (b *OpenStackPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...configv1.IP) *OpenStackPlatformSpecApplyConfiguration { for i := range values { b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) } @@ -33,7 +33,7 @@ func (b *OpenStackPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(value // WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the IngressIPs field. -func (b *OpenStackPlatformSpecApplyConfiguration) WithIngressIPs(values ...v1.IP) *OpenStackPlatformSpecApplyConfiguration { +func (b *OpenStackPlatformSpecApplyConfiguration) WithIngressIPs(values ...configv1.IP) *OpenStackPlatformSpecApplyConfiguration { for i := range values { b.IngressIPs = append(b.IngressIPs, values[i]) } @@ -43,7 +43,7 @@ func (b *OpenStackPlatformSpecApplyConfiguration) WithIngressIPs(values ...v1.IP // WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the MachineNetworks field. -func (b *OpenStackPlatformSpecApplyConfiguration) WithMachineNetworks(values ...v1.CIDR) *OpenStackPlatformSpecApplyConfiguration { +func (b *OpenStackPlatformSpecApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *OpenStackPlatformSpecApplyConfiguration { for i := range values { b.MachineNetworks = append(b.MachineNetworks, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go index bda9b75e8..df95eb84d 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // OperatorHubApplyConfiguration represents a declarative configuration of the OperatorHub type for use // with apply. type OperatorHubApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *OperatorHubSpecApplyConfiguration `json:"spec,omitempty"` - Status *OperatorHubStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OperatorHubSpecApplyConfiguration `json:"spec,omitempty"` + Status *OperatorHubStatusApplyConfiguration `json:"status,omitempty"` } // OperatorHub constructs a declarative configuration of the OperatorHub type for use with @@ -41,18 +41,18 @@ func OperatorHub(name string) *OperatorHubApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractOperatorHub(operatorHub *apiconfigv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) { +func ExtractOperatorHub(operatorHub *configv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) { return extractOperatorHub(operatorHub, fieldManager, "") } // ExtractOperatorHubStatus is the same as ExtractOperatorHub except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractOperatorHubStatus(operatorHub *apiconfigv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) { +func ExtractOperatorHubStatus(operatorHub *configv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) { return extractOperatorHub(operatorHub, fieldManager, "status") } -func extractOperatorHub(operatorHub *apiconfigv1.OperatorHub, fieldManager string, subresource string) (*OperatorHubApplyConfiguration, error) { +func extractOperatorHub(operatorHub *configv1.OperatorHub, fieldManager string, subresource string) (*OperatorHubApplyConfiguration, error) { b := &OperatorHubApplyConfiguration{} err := managedfields.ExtractInto(operatorHub, internal.Parser().Type("com.github.openshift.api.config.v1.OperatorHub"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractOperatorHub(operatorHub *apiconfigv1.OperatorHub, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithKind(value string) *OperatorHubApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *OperatorHubApplyConfiguration) WithKind(value string) *OperatorHubApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithAPIVersion(value string) *OperatorHubApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *OperatorHubApplyConfiguration) WithAPIVersion(value string) *OperatorHu // If called multiple times, the Name field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithName(value string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *OperatorHubApplyConfiguration) WithName(value string) *OperatorHubApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithGenerateName(value string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *OperatorHubApplyConfiguration) WithGenerateName(value string) *Operator // If called multiple times, the Namespace field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithNamespace(value string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *OperatorHubApplyConfiguration) WithNamespace(value string) *OperatorHub // If called multiple times, the UID field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithUID(value types.UID) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *OperatorHubApplyConfiguration) WithUID(value types.UID) *OperatorHubApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithResourceVersion(value string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *OperatorHubApplyConfiguration) WithResourceVersion(value string) *Opera // If called multiple times, the Generation field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithGeneration(value int64) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *OperatorHubApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OperatorHubApplyConfiguration { +func (b *OperatorHubApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *OperatorHubApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OperatorHubApplyConfiguration { +func (b *OperatorHubApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *OperatorHubApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *OperatorHubApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *OperatorHubApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *OperatorHubApplyConfiguration) WithLabels(entries map[string]string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *OperatorHubApplyConfiguration) WithLabels(entries map[string]string) *O // overwriting an existing map entries in Annotations field with the same key. func (b *OperatorHubApplyConfiguration) WithAnnotations(entries map[string]string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *OperatorHubApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *OperatorHubApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OperatorHubApplyConfiguration { +func (b *OperatorHubApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *OperatorHubApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *OperatorHubApplyConfiguration) WithFinalizers(values ...string) *OperatorHubApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *OperatorHubApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *OperatorHubApplyConfiguration) WithStatus(value *OperatorHubStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *OperatorHubApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go index b679a55fd..e81d48044 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // OvirtPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the OvirtPlatformLoadBalancer type for use // with apply. type OvirtPlatformLoadBalancerApplyConfiguration struct { - Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` } // OvirtPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the OvirtPlatformLoadBalancer type for use with @@ -21,7 +21,7 @@ func OvirtPlatformLoadBalancer() *OvirtPlatformLoadBalancerApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *OvirtPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *OvirtPlatformLoadBalancerApplyConfiguration { +func (b *OvirtPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *OvirtPlatformLoadBalancerApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go index 6ff5bcf0c..517ac0bfc 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go @@ -3,25 +3,25 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // PlatformSpecApplyConfiguration represents a declarative configuration of the PlatformSpec type for use // with apply. type PlatformSpecApplyConfiguration struct { - Type *v1.PlatformType `json:"type,omitempty"` + Type *configv1.PlatformType `json:"type,omitempty"` AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` - Azure *v1.AzurePlatformSpec `json:"azure,omitempty"` - GCP *v1.GCPPlatformSpec `json:"gcp,omitempty"` + Azure *configv1.AzurePlatformSpec `json:"azure,omitempty"` + GCP *configv1.GCPPlatformSpec `json:"gcp,omitempty"` BareMetal *BareMetalPlatformSpecApplyConfiguration `json:"baremetal,omitempty"` OpenStack *OpenStackPlatformSpecApplyConfiguration `json:"openstack,omitempty"` - Ovirt *v1.OvirtPlatformSpec `json:"ovirt,omitempty"` + Ovirt *configv1.OvirtPlatformSpec `json:"ovirt,omitempty"` VSphere *VSpherePlatformSpecApplyConfiguration `json:"vsphere,omitempty"` - IBMCloud *v1.IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - Kubevirt *v1.KubevirtPlatformSpec `json:"kubevirt,omitempty"` - EquinixMetal *v1.EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + IBMCloud *configv1.IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + Kubevirt *configv1.KubevirtPlatformSpec `json:"kubevirt,omitempty"` + EquinixMetal *configv1.EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` - AlibabaCloud *v1.AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` + AlibabaCloud *configv1.AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` Nutanix *NutanixPlatformSpecApplyConfiguration `json:"nutanix,omitempty"` External *ExternalPlatformSpecApplyConfiguration `json:"external,omitempty"` } @@ -35,7 +35,7 @@ func PlatformSpec() *PlatformSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithType(value configv1.PlatformType) *PlatformSpecApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *PlatformSpecApplyConfiguration) WithAWS(value *AWSPlatformSpecApplyConf // WithAzure sets the Azure field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Azure field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithAzure(value v1.AzurePlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithAzure(value configv1.AzurePlatformSpec) *PlatformSpecApplyConfiguration { b.Azure = &value return b } @@ -59,7 +59,7 @@ func (b *PlatformSpecApplyConfiguration) WithAzure(value v1.AzurePlatformSpec) * // WithGCP sets the GCP field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GCP field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithGCP(value v1.GCPPlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithGCP(value configv1.GCPPlatformSpec) *PlatformSpecApplyConfiguration { b.GCP = &value return b } @@ -83,7 +83,7 @@ func (b *PlatformSpecApplyConfiguration) WithOpenStack(value *OpenStackPlatformS // WithOvirt sets the Ovirt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Ovirt field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithOvirt(value v1.OvirtPlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithOvirt(value configv1.OvirtPlatformSpec) *PlatformSpecApplyConfiguration { b.Ovirt = &value return b } @@ -99,7 +99,7 @@ func (b *PlatformSpecApplyConfiguration) WithVSphere(value *VSpherePlatformSpecA // WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IBMCloud field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithIBMCloud(value v1.IBMCloudPlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithIBMCloud(value configv1.IBMCloudPlatformSpec) *PlatformSpecApplyConfiguration { b.IBMCloud = &value return b } @@ -107,7 +107,7 @@ func (b *PlatformSpecApplyConfiguration) WithIBMCloud(value v1.IBMCloudPlatformS // WithKubevirt sets the Kubevirt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kubevirt field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithKubevirt(value v1.KubevirtPlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithKubevirt(value configv1.KubevirtPlatformSpec) *PlatformSpecApplyConfiguration { b.Kubevirt = &value return b } @@ -115,7 +115,7 @@ func (b *PlatformSpecApplyConfiguration) WithKubevirt(value v1.KubevirtPlatformS // WithEquinixMetal sets the EquinixMetal field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EquinixMetal field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithEquinixMetal(value v1.EquinixMetalPlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithEquinixMetal(value configv1.EquinixMetalPlatformSpec) *PlatformSpecApplyConfiguration { b.EquinixMetal = &value return b } @@ -131,7 +131,7 @@ func (b *PlatformSpecApplyConfiguration) WithPowerVS(value *PowerVSPlatformSpecA // WithAlibabaCloud sets the AlibabaCloud field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AlibabaCloud field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithAlibabaCloud(value v1.AlibabaCloudPlatformSpec) *PlatformSpecApplyConfiguration { +func (b *PlatformSpecApplyConfiguration) WithAlibabaCloud(value configv1.AlibabaCloudPlatformSpec) *PlatformSpecApplyConfiguration { b.AlibabaCloud = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go index 6519ef8b0..e470ebd96 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // PlatformStatusApplyConfiguration represents a declarative configuration of the PlatformStatus type for use // with apply. type PlatformStatusApplyConfiguration struct { - Type *v1.PlatformType `json:"type,omitempty"` + Type *configv1.PlatformType `json:"type,omitempty"` AWS *AWSPlatformStatusApplyConfiguration `json:"aws,omitempty"` Azure *AzurePlatformStatusApplyConfiguration `json:"azure,omitempty"` GCP *GCPPlatformStatusApplyConfiguration `json:"gcp,omitempty"` @@ -35,7 +35,7 @@ func PlatformStatus() *PlatformStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PlatformStatusApplyConfiguration) WithType(value v1.PlatformType) *PlatformStatusApplyConfiguration { +func (b *PlatformStatusApplyConfiguration) WithType(value configv1.PlatformType) *PlatformStatusApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go index bf10a6891..245520433 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go @@ -19,7 +19,7 @@ func PrefixedClaimMapping() *PrefixedClaimMappingApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Claim field is set to the value of the last call. func (b *PrefixedClaimMappingApplyConfiguration) WithClaim(value string) *PrefixedClaimMappingApplyConfiguration { - b.Claim = &value + b.TokenClaimMappingApplyConfiguration.Claim = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go index f0e90acc9..c2392bab9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ProfileCustomizationsApplyConfiguration represents a declarative configuration of the ProfileCustomizations type for use // with apply. type ProfileCustomizationsApplyConfiguration struct { - DynamicResourceAllocation *v1.DRAEnablement `json:"dynamicResourceAllocation,omitempty"` + DynamicResourceAllocation *configv1.DRAEnablement `json:"dynamicResourceAllocation,omitempty"` } // ProfileCustomizationsApplyConfiguration constructs a declarative configuration of the ProfileCustomizations type for use with @@ -21,7 +21,7 @@ func ProfileCustomizations() *ProfileCustomizationsApplyConfiguration { // WithDynamicResourceAllocation sets the DynamicResourceAllocation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DynamicResourceAllocation field is set to the value of the last call. -func (b *ProfileCustomizationsApplyConfiguration) WithDynamicResourceAllocation(value v1.DRAEnablement) *ProfileCustomizationsApplyConfiguration { +func (b *ProfileCustomizationsApplyConfiguration) WithDynamicResourceAllocation(value configv1.DRAEnablement) *ProfileCustomizationsApplyConfiguration { b.DynamicResourceAllocation = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go index 864277b59..5c040bae4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ProjectApplyConfiguration represents a declarative configuration of the Project type for use // with apply. type ProjectApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ProjectSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.ProjectStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ProjectSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.ProjectStatus `json:"status,omitempty"` } // Project constructs a declarative configuration of the Project type for use with @@ -41,18 +41,18 @@ func Project(name string) *ProjectApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractProject(project *apiconfigv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) { +func ExtractProject(project *configv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) { return extractProject(project, fieldManager, "") } // ExtractProjectStatus is the same as ExtractProject except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractProjectStatus(project *apiconfigv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) { +func ExtractProjectStatus(project *configv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) { return extractProject(project, fieldManager, "status") } -func extractProject(project *apiconfigv1.Project, fieldManager string, subresource string) (*ProjectApplyConfiguration, error) { +func extractProject(project *configv1.Project, fieldManager string, subresource string) (*ProjectApplyConfiguration, error) { b := &ProjectApplyConfiguration{} err := managedfields.ExtractInto(project, internal.Parser().Type("com.github.openshift.api.config.v1.Project"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractProject(project *apiconfigv1.Project, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithKind(value string) *ProjectApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ProjectApplyConfiguration) WithKind(value string) *ProjectApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithAPIVersion(value string) *ProjectApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ProjectApplyConfiguration) WithAPIVersion(value string) *ProjectApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithName(value string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ProjectApplyConfiguration) WithName(value string) *ProjectApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithGenerateName(value string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ProjectApplyConfiguration) WithGenerateName(value string) *ProjectApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithNamespace(value string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ProjectApplyConfiguration) WithNamespace(value string) *ProjectApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithUID(value types.UID) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ProjectApplyConfiguration) WithUID(value types.UID) *ProjectApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithResourceVersion(value string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ProjectApplyConfiguration) WithResourceVersion(value string) *ProjectAp // If called multiple times, the Generation field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithGeneration(value int64) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ProjectApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ProjectApplyConfiguration { +func (b *ProjectApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ProjectApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ProjectApplyConfiguration { +func (b *ProjectApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ProjectApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Pr // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ProjectApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ProjectApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *ProjectApplyConfiguration) WithLabels(entries map[string]string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ProjectApplyConfiguration) WithLabels(entries map[string]string) *Proje // overwriting an existing map entries in Annotations field with the same key. func (b *ProjectApplyConfiguration) WithAnnotations(entries map[string]string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ProjectApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ProjectApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ProjectApplyConfiguration { +func (b *ProjectApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ProjectApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *ProjectApplyConfiguration) WithFinalizers(values ...string) *ProjectApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ProjectApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *ProjectApplyConfiguration) WithSpec(value *ProjectSpecApplyConfiguratio // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ProjectApplyConfiguration) WithStatus(value apiconfigv1.ProjectStatus) *ProjectApplyConfiguration { +func (b *ProjectApplyConfiguration) WithStatus(value configv1.ProjectStatus) *ProjectApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *ProjectApplyConfiguration) WithStatus(value apiconfigv1.ProjectStatus) // GetName retrieves the value of the Name field in the declarative configuration. func (b *ProjectApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go index fdfe260f5..7184cbd08 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ProxyApplyConfiguration represents a declarative configuration of the Proxy type for use // with apply. type ProxyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ProxySpecApplyConfiguration `json:"spec,omitempty"` - Status *ProxyStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ProxySpecApplyConfiguration `json:"spec,omitempty"` + Status *ProxyStatusApplyConfiguration `json:"status,omitempty"` } // Proxy constructs a declarative configuration of the Proxy type for use with @@ -41,18 +41,18 @@ func Proxy(name string) *ProxyApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractProxy(proxy *apiconfigv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) { +func ExtractProxy(proxy *configv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) { return extractProxy(proxy, fieldManager, "") } // ExtractProxyStatus is the same as ExtractProxy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractProxyStatus(proxy *apiconfigv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) { +func ExtractProxyStatus(proxy *configv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) { return extractProxy(proxy, fieldManager, "status") } -func extractProxy(proxy *apiconfigv1.Proxy, fieldManager string, subresource string) (*ProxyApplyConfiguration, error) { +func extractProxy(proxy *configv1.Proxy, fieldManager string, subresource string) (*ProxyApplyConfiguration, error) { b := &ProxyApplyConfiguration{} err := managedfields.ExtractInto(proxy, internal.Parser().Type("com.github.openshift.api.config.v1.Proxy"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractProxy(proxy *apiconfigv1.Proxy, fieldManager string, subresource str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithKind(value string) *ProxyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ProxyApplyConfiguration) WithKind(value string) *ProxyApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithAPIVersion(value string) *ProxyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ProxyApplyConfiguration) WithAPIVersion(value string) *ProxyApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithName(value string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ProxyApplyConfiguration) WithName(value string) *ProxyApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithGenerateName(value string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ProxyApplyConfiguration) WithGenerateName(value string) *ProxyApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithNamespace(value string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ProxyApplyConfiguration) WithNamespace(value string) *ProxyApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithUID(value types.UID) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ProxyApplyConfiguration) WithUID(value types.UID) *ProxyApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithResourceVersion(value string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ProxyApplyConfiguration) WithResourceVersion(value string) *ProxyApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithGeneration(value int64) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ProxyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ProxyApplyConfiguration { +func (b *ProxyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ProxyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ProxyApplyConfiguration { +func (b *ProxyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ProxyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Prox // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ProxyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ProxyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *P // overwriting an existing map entries in Labels field with the same key. func (b *ProxyApplyConfiguration) WithLabels(entries map[string]string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ProxyApplyConfiguration) WithLabels(entries map[string]string) *ProxyAp // overwriting an existing map entries in Annotations field with the same key. func (b *ProxyApplyConfiguration) WithAnnotations(entries map[string]string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ProxyApplyConfiguration) WithAnnotations(entries map[string]string) *Pr // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ProxyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ProxyApplyConfiguration { +func (b *ProxyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ProxyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ProxyApplyConfiguration) WithFinalizers(values ...string) *ProxyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ProxyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ProxyApplyConfiguration) WithStatus(value *ProxyStatusApplyConfiguratio // GetName retrieves the value of the Name field in the declarative configuration. func (b *ProxyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go index 4ffecd926..c8275fcde 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go @@ -3,16 +3,17 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ReleaseApplyConfiguration represents a declarative configuration of the Release type for use // with apply. type ReleaseApplyConfiguration struct { - Version *string `json:"version,omitempty"` - Image *string `json:"image,omitempty"` - URL *v1.URL `json:"url,omitempty"` - Channels []string `json:"channels,omitempty"` + Architecture *configv1.ClusterVersionArchitecture `json:"architecture,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + URL *configv1.URL `json:"url,omitempty"` + Channels []string `json:"channels,omitempty"` } // ReleaseApplyConfiguration constructs a declarative configuration of the Release type for use with @@ -21,6 +22,14 @@ func Release() *ReleaseApplyConfiguration { return &ReleaseApplyConfiguration{} } +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *ReleaseApplyConfiguration) WithArchitecture(value configv1.ClusterVersionArchitecture) *ReleaseApplyConfiguration { + b.Architecture = &value + return b +} + // WithVersion sets the Version field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. @@ -40,7 +49,7 @@ func (b *ReleaseApplyConfiguration) WithImage(value string) *ReleaseApplyConfigu // WithURL sets the URL field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the URL field is set to the value of the last call. -func (b *ReleaseApplyConfiguration) WithURL(value v1.URL) *ReleaseApplyConfiguration { +func (b *ReleaseApplyConfiguration) WithURL(value configv1.URL) *ReleaseApplyConfiguration { b.URL = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go index f903170f3..96f724095 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // RepositoryDigestMirrorsApplyConfiguration represents a declarative configuration of the RepositoryDigestMirrors type for use // with apply. type RepositoryDigestMirrorsApplyConfiguration struct { - Source *string `json:"source,omitempty"` - AllowMirrorByTags *bool `json:"allowMirrorByTags,omitempty"` - Mirrors []v1.Mirror `json:"mirrors,omitempty"` + Source *string `json:"source,omitempty"` + AllowMirrorByTags *bool `json:"allowMirrorByTags,omitempty"` + Mirrors []configv1.Mirror `json:"mirrors,omitempty"` } // RepositoryDigestMirrorsApplyConfiguration constructs a declarative configuration of the RepositoryDigestMirrors type for use with @@ -39,7 +39,7 @@ func (b *RepositoryDigestMirrorsApplyConfiguration) WithAllowMirrorByTags(value // WithMirrors adds the given value to the Mirrors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Mirrors field. -func (b *RepositoryDigestMirrorsApplyConfiguration) WithMirrors(values ...v1.Mirror) *RepositoryDigestMirrorsApplyConfiguration { +func (b *RepositoryDigestMirrorsApplyConfiguration) WithMirrors(values ...configv1.Mirror) *RepositoryDigestMirrorsApplyConfiguration { for i := range values { b.Mirrors = append(b.Mirrors, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go index a9ffe1350..c68466123 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go @@ -3,18 +3,18 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // RequiredHSTSPolicyApplyConfiguration represents a declarative configuration of the RequiredHSTSPolicy type for use // with apply. type RequiredHSTSPolicyApplyConfiguration struct { - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - DomainPatterns []string `json:"domainPatterns,omitempty"` - MaxAge *MaxAgePolicyApplyConfiguration `json:"maxAge,omitempty"` - PreloadPolicy *apiconfigv1.PreloadPolicy `json:"preloadPolicy,omitempty"` - IncludeSubDomainsPolicy *apiconfigv1.IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + DomainPatterns []string `json:"domainPatterns,omitempty"` + MaxAge *MaxAgePolicyApplyConfiguration `json:"maxAge,omitempty"` + PreloadPolicy *configv1.PreloadPolicy `json:"preloadPolicy,omitempty"` + IncludeSubDomainsPolicy *configv1.IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` } // RequiredHSTSPolicyApplyConfiguration constructs a declarative configuration of the RequiredHSTSPolicy type for use with @@ -26,7 +26,7 @@ func RequiredHSTSPolicy() *RequiredHSTSPolicyApplyConfiguration { // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *RequiredHSTSPolicyApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration { +func (b *RequiredHSTSPolicyApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration { b.NamespaceSelector = value return b } @@ -52,7 +52,7 @@ func (b *RequiredHSTSPolicyApplyConfiguration) WithMaxAge(value *MaxAgePolicyApp // WithPreloadPolicy sets the PreloadPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PreloadPolicy field is set to the value of the last call. -func (b *RequiredHSTSPolicyApplyConfiguration) WithPreloadPolicy(value apiconfigv1.PreloadPolicy) *RequiredHSTSPolicyApplyConfiguration { +func (b *RequiredHSTSPolicyApplyConfiguration) WithPreloadPolicy(value configv1.PreloadPolicy) *RequiredHSTSPolicyApplyConfiguration { b.PreloadPolicy = &value return b } @@ -60,7 +60,7 @@ func (b *RequiredHSTSPolicyApplyConfiguration) WithPreloadPolicy(value apiconfig // WithIncludeSubDomainsPolicy sets the IncludeSubDomainsPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IncludeSubDomainsPolicy field is set to the value of the last call. -func (b *RequiredHSTSPolicyApplyConfiguration) WithIncludeSubDomainsPolicy(value apiconfigv1.IncludeSubDomainsPolicy) *RequiredHSTSPolicyApplyConfiguration { +func (b *RequiredHSTSPolicyApplyConfiguration) WithIncludeSubDomainsPolicy(value configv1.IncludeSubDomainsPolicy) *RequiredHSTSPolicyApplyConfiguration { b.IncludeSubDomainsPolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go index 33e73fab9..fa2323d72 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go @@ -3,21 +3,21 @@ package v1 import ( - apiconfigv1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" internal "github.com/openshift/client-go/config/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // SchedulerApplyConfiguration represents a declarative configuration of the Scheduler type for use // with apply. type SchedulerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *SchedulerSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.SchedulerStatus `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *SchedulerSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.SchedulerStatus `json:"status,omitempty"` } // Scheduler constructs a declarative configuration of the Scheduler type for use with @@ -41,18 +41,18 @@ func Scheduler(name string) *SchedulerApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractScheduler(scheduler *apiconfigv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) { +func ExtractScheduler(scheduler *configv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) { return extractScheduler(scheduler, fieldManager, "") } // ExtractSchedulerStatus is the same as ExtractScheduler except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractSchedulerStatus(scheduler *apiconfigv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) { +func ExtractSchedulerStatus(scheduler *configv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) { return extractScheduler(scheduler, fieldManager, "status") } -func extractScheduler(scheduler *apiconfigv1.Scheduler, fieldManager string, subresource string) (*SchedulerApplyConfiguration, error) { +func extractScheduler(scheduler *configv1.Scheduler, fieldManager string, subresource string) (*SchedulerApplyConfiguration, error) { b := &SchedulerApplyConfiguration{} err := managedfields.ExtractInto(scheduler, internal.Parser().Type("com.github.openshift.api.config.v1.Scheduler"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractScheduler(scheduler *apiconfigv1.Scheduler, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithKind(value string) *SchedulerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *SchedulerApplyConfiguration) WithKind(value string) *SchedulerApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithAPIVersion(value string) *SchedulerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *SchedulerApplyConfiguration) WithAPIVersion(value string) *SchedulerApp // If called multiple times, the Name field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithName(value string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *SchedulerApplyConfiguration) WithName(value string) *SchedulerApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithGenerateName(value string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *SchedulerApplyConfiguration) WithGenerateName(value string) *SchedulerA // If called multiple times, the Namespace field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithNamespace(value string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *SchedulerApplyConfiguration) WithNamespace(value string) *SchedulerAppl // If called multiple times, the UID field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithUID(value types.UID) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *SchedulerApplyConfiguration) WithUID(value types.UID) *SchedulerApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithResourceVersion(value string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *SchedulerApplyConfiguration) WithResourceVersion(value string) *Schedul // If called multiple times, the Generation field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithGeneration(value int64) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *SchedulerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *SchedulerApplyConfiguration { +func (b *SchedulerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *SchedulerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *SchedulerApplyConfiguration { +func (b *SchedulerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *SchedulerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *SchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *SchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *SchedulerApplyConfiguration) WithLabels(entries map[string]string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *SchedulerApplyConfiguration) WithLabels(entries map[string]string) *Sch // overwriting an existing map entries in Annotations field with the same key. func (b *SchedulerApplyConfiguration) WithAnnotations(entries map[string]string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *SchedulerApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *SchedulerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *SchedulerApplyConfiguration { +func (b *SchedulerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *SchedulerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *SchedulerApplyConfiguration) WithFinalizers(values ...string) *SchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *SchedulerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -234,7 +234,7 @@ func (b *SchedulerApplyConfiguration) WithSpec(value *SchedulerSpecApplyConfigur // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *SchedulerApplyConfiguration) WithStatus(value apiconfigv1.SchedulerStatus) *SchedulerApplyConfiguration { +func (b *SchedulerApplyConfiguration) WithStatus(value configv1.SchedulerStatus) *SchedulerApplyConfiguration { b.Status = &value return b } @@ -242,5 +242,5 @@ func (b *SchedulerApplyConfiguration) WithStatus(value apiconfigv1.SchedulerStat // GetName retrieves the value of the Name field in the declarative configuration. func (b *SchedulerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go index c874fcf0c..43590d0ef 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // TLSProfileSpecApplyConfiguration represents a declarative configuration of the TLSProfileSpec type for use // with apply. type TLSProfileSpecApplyConfiguration struct { - Ciphers []string `json:"ciphers,omitempty"` - MinTLSVersion *v1.TLSProtocolVersion `json:"minTLSVersion,omitempty"` + Ciphers []string `json:"ciphers,omitempty"` + MinTLSVersion *configv1.TLSProtocolVersion `json:"minTLSVersion,omitempty"` } // TLSProfileSpecApplyConfiguration constructs a declarative configuration of the TLSProfileSpec type for use with @@ -32,7 +32,7 @@ func (b *TLSProfileSpecApplyConfiguration) WithCiphers(values ...string) *TLSPro // WithMinTLSVersion sets the MinTLSVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MinTLSVersion field is set to the value of the last call. -func (b *TLSProfileSpecApplyConfiguration) WithMinTLSVersion(value v1.TLSProtocolVersion) *TLSProfileSpecApplyConfiguration { +func (b *TLSProfileSpecApplyConfiguration) WithMinTLSVersion(value configv1.TLSProtocolVersion) *TLSProfileSpecApplyConfiguration { b.MinTLSVersion = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go index 004632489..e5806e33c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // TLSSecurityProfileApplyConfiguration represents a declarative configuration of the TLSSecurityProfile type for use // with apply. type TLSSecurityProfileApplyConfiguration struct { - Type *v1.TLSProfileType `json:"type,omitempty"` - Old *v1.OldTLSProfile `json:"old,omitempty"` - Intermediate *v1.IntermediateTLSProfile `json:"intermediate,omitempty"` - Modern *v1.ModernTLSProfile `json:"modern,omitempty"` + Type *configv1.TLSProfileType `json:"type,omitempty"` + Old *configv1.OldTLSProfile `json:"old,omitempty"` + Intermediate *configv1.IntermediateTLSProfile `json:"intermediate,omitempty"` + Modern *configv1.ModernTLSProfile `json:"modern,omitempty"` Custom *CustomTLSProfileApplyConfiguration `json:"custom,omitempty"` } @@ -25,7 +25,7 @@ func TLSSecurityProfile() *TLSSecurityProfileApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *TLSSecurityProfileApplyConfiguration) WithType(value v1.TLSProfileType) *TLSSecurityProfileApplyConfiguration { +func (b *TLSSecurityProfileApplyConfiguration) WithType(value configv1.TLSProfileType) *TLSSecurityProfileApplyConfiguration { b.Type = &value return b } @@ -33,7 +33,7 @@ func (b *TLSSecurityProfileApplyConfiguration) WithType(value v1.TLSProfileType) // WithOld sets the Old field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Old field is set to the value of the last call. -func (b *TLSSecurityProfileApplyConfiguration) WithOld(value v1.OldTLSProfile) *TLSSecurityProfileApplyConfiguration { +func (b *TLSSecurityProfileApplyConfiguration) WithOld(value configv1.OldTLSProfile) *TLSSecurityProfileApplyConfiguration { b.Old = &value return b } @@ -41,7 +41,7 @@ func (b *TLSSecurityProfileApplyConfiguration) WithOld(value v1.OldTLSProfile) * // WithIntermediate sets the Intermediate field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Intermediate field is set to the value of the last call. -func (b *TLSSecurityProfileApplyConfiguration) WithIntermediate(value v1.IntermediateTLSProfile) *TLSSecurityProfileApplyConfiguration { +func (b *TLSSecurityProfileApplyConfiguration) WithIntermediate(value configv1.IntermediateTLSProfile) *TLSSecurityProfileApplyConfiguration { b.Intermediate = &value return b } @@ -49,7 +49,7 @@ func (b *TLSSecurityProfileApplyConfiguration) WithIntermediate(value v1.Interme // WithModern sets the Modern field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Modern field is set to the value of the last call. -func (b *TLSSecurityProfileApplyConfiguration) WithModern(value v1.ModernTLSProfile) *TLSSecurityProfileApplyConfiguration { +func (b *TLSSecurityProfileApplyConfiguration) WithModern(value configv1.ModernTLSProfile) *TLSSecurityProfileApplyConfiguration { b.Modern = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go index ad9968eef..74e9f6109 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // TokenClaimValidationRuleApplyConfiguration represents a declarative configuration of the TokenClaimValidationRule type for use // with apply. type TokenClaimValidationRuleApplyConfiguration struct { - Type *v1.TokenValidationRuleType `json:"type,omitempty"` + Type *configv1.TokenValidationRuleType `json:"type,omitempty"` RequiredClaim *TokenRequiredClaimApplyConfiguration `json:"requiredClaim,omitempty"` } @@ -22,7 +22,7 @@ func TokenClaimValidationRule() *TokenClaimValidationRuleApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *TokenClaimValidationRuleApplyConfiguration) WithType(value v1.TokenValidationRuleType) *TokenClaimValidationRuleApplyConfiguration { +func (b *TokenClaimValidationRuleApplyConfiguration) WithType(value configv1.TokenValidationRuleType) *TokenClaimValidationRuleApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go index 7a103935e..e1b6c4b51 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // TokenConfigApplyConfiguration represents a declarative configuration of the TokenConfig type for use // with apply. type TokenConfigApplyConfiguration struct { - AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty"` - AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` - AccessTokenInactivityTimeout *v1.Duration `json:"accessTokenInactivityTimeout,omitempty"` + AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` } // TokenConfigApplyConfiguration constructs a declarative configuration of the TokenConfig type for use with @@ -39,7 +39,7 @@ func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeoutSeconds( // WithAccessTokenInactivityTimeout sets the AccessTokenInactivityTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AccessTokenInactivityTimeout field is set to the value of the last call. -func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeout(value v1.Duration) *TokenConfigApplyConfiguration { +func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeout(value metav1.Duration) *TokenConfigApplyConfiguration { b.AccessTokenInactivityTimeout = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go index 0811ca729..68f590abc 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // TokenIssuerApplyConfiguration represents a declarative configuration of the TokenIssuer type for use // with apply. type TokenIssuerApplyConfiguration struct { URL *string `json:"issuerURL,omitempty"` - Audiences []v1.TokenAudience `json:"audiences,omitempty"` + Audiences []configv1.TokenAudience `json:"audiences,omitempty"` CertificateAuthority *ConfigMapNameReferenceApplyConfiguration `json:"issuerCertificateAuthority,omitempty"` } @@ -31,7 +31,7 @@ func (b *TokenIssuerApplyConfiguration) WithURL(value string) *TokenIssuerApplyC // WithAudiences adds the given value to the Audiences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Audiences field. -func (b *TokenIssuerApplyConfiguration) WithAudiences(values ...v1.TokenAudience) *TokenIssuerApplyConfiguration { +func (b *TokenIssuerApplyConfiguration) WithAudiences(values ...configv1.TokenAudience) *TokenIssuerApplyConfiguration { for i := range values { b.Audiences = append(b.Audiences, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go index 1d1547d87..004d1bac2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // UpdateApplyConfiguration represents a declarative configuration of the Update type for use // with apply. type UpdateApplyConfiguration struct { - Architecture *v1.ClusterVersionArchitecture `json:"architecture,omitempty"` - Version *string `json:"version,omitempty"` - Image *string `json:"image,omitempty"` - Force *bool `json:"force,omitempty"` + Architecture *configv1.ClusterVersionArchitecture `json:"architecture,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + Force *bool `json:"force,omitempty"` } // UpdateApplyConfiguration constructs a declarative configuration of the Update type for use with @@ -24,7 +24,7 @@ func Update() *UpdateApplyConfiguration { // WithArchitecture sets the Architecture field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Architecture field is set to the value of the last call. -func (b *UpdateApplyConfiguration) WithArchitecture(value v1.ClusterVersionArchitecture) *UpdateApplyConfiguration { +func (b *UpdateApplyConfiguration) WithArchitecture(value configv1.ClusterVersionArchitecture) *UpdateApplyConfiguration { b.Architecture = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go index 6a4deaf3f..b7998eb61 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go @@ -3,20 +3,20 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // UpdateHistoryApplyConfiguration represents a declarative configuration of the UpdateHistory type for use // with apply. type UpdateHistoryApplyConfiguration struct { - State *v1.UpdateState `json:"state,omitempty"` - StartedTime *metav1.Time `json:"startedTime,omitempty"` - CompletionTime *metav1.Time `json:"completionTime,omitempty"` - Version *string `json:"version,omitempty"` - Image *string `json:"image,omitempty"` - Verified *bool `json:"verified,omitempty"` - AcceptedRisks *string `json:"acceptedRisks,omitempty"` + State *configv1.UpdateState `json:"state,omitempty"` + StartedTime *metav1.Time `json:"startedTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + Verified *bool `json:"verified,omitempty"` + AcceptedRisks *string `json:"acceptedRisks,omitempty"` } // UpdateHistoryApplyConfiguration constructs a declarative configuration of the UpdateHistory type for use with @@ -28,7 +28,7 @@ func UpdateHistory() *UpdateHistoryApplyConfiguration { // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *UpdateHistoryApplyConfiguration) WithState(value v1.UpdateState) *UpdateHistoryApplyConfiguration { +func (b *UpdateHistoryApplyConfiguration) WithState(value configv1.UpdateState) *UpdateHistoryApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go index daec7fb46..e90a90117 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go @@ -24,7 +24,7 @@ func UsernameClaimMapping() *UsernameClaimMappingApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Claim field is set to the value of the last call. func (b *UsernameClaimMappingApplyConfiguration) WithClaim(value string) *UsernameClaimMappingApplyConfiguration { - b.Claim = &value + b.TokenClaimMappingApplyConfiguration.Claim = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go new file mode 100644 index 000000000..f590263a1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSphereFailureDomainHostGroupApplyConfiguration represents a declarative configuration of the VSphereFailureDomainHostGroup type for use +// with apply. +type VSphereFailureDomainHostGroupApplyConfiguration struct { + VMGroup *string `json:"vmGroup,omitempty"` + HostGroup *string `json:"hostGroup,omitempty"` + VMHostRule *string `json:"vmHostRule,omitempty"` +} + +// VSphereFailureDomainHostGroupApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainHostGroup type for use with +// apply. +func VSphereFailureDomainHostGroup() *VSphereFailureDomainHostGroupApplyConfiguration { + return &VSphereFailureDomainHostGroupApplyConfiguration{} +} + +// WithVMGroup sets the VMGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VMGroup field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithVMGroup(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.VMGroup = &value + return b +} + +// WithHostGroup sets the HostGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostGroup field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithHostGroup(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.HostGroup = &value + return b +} + +// WithVMHostRule sets the VMHostRule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VMHostRule field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithVMHostRule(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.VMHostRule = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go new file mode 100644 index 000000000..bf923d829 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSphereFailureDomainRegionAffinityApplyConfiguration represents a declarative configuration of the VSphereFailureDomainRegionAffinity type for use +// with apply. +type VSphereFailureDomainRegionAffinityApplyConfiguration struct { + Type *configv1.VSphereFailureDomainRegionType `json:"type,omitempty"` +} + +// VSphereFailureDomainRegionAffinityApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainRegionAffinity type for use with +// apply. +func VSphereFailureDomainRegionAffinity() *VSphereFailureDomainRegionAffinityApplyConfiguration { + return &VSphereFailureDomainRegionAffinityApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSphereFailureDomainRegionAffinityApplyConfiguration) WithType(value configv1.VSphereFailureDomainRegionType) *VSphereFailureDomainRegionAffinityApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go new file mode 100644 index 000000000..5bbbe9556 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSphereFailureDomainZoneAffinityApplyConfiguration represents a declarative configuration of the VSphereFailureDomainZoneAffinity type for use +// with apply. +type VSphereFailureDomainZoneAffinityApplyConfiguration struct { + Type *configv1.VSphereFailureDomainZoneType `json:"type,omitempty"` + HostGroup *VSphereFailureDomainHostGroupApplyConfiguration `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainZoneAffinityApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainZoneAffinity type for use with +// apply. +func VSphereFailureDomainZoneAffinity() *VSphereFailureDomainZoneAffinityApplyConfiguration { + return &VSphereFailureDomainZoneAffinityApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSphereFailureDomainZoneAffinityApplyConfiguration) WithType(value configv1.VSphereFailureDomainZoneType) *VSphereFailureDomainZoneAffinityApplyConfiguration { + b.Type = &value + return b +} + +// WithHostGroup sets the HostGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostGroup field is set to the value of the last call. +func (b *VSphereFailureDomainZoneAffinityApplyConfiguration) WithHostGroup(value *VSphereFailureDomainHostGroupApplyConfiguration) *VSphereFailureDomainZoneAffinityApplyConfiguration { + b.HostGroup = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go index 1c5ec2cf1..aeb238882 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go @@ -5,11 +5,13 @@ package v1 // VSpherePlatformFailureDomainSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformFailureDomainSpec type for use // with apply. type VSpherePlatformFailureDomainSpecApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Region *string `json:"region,omitempty"` - Zone *string `json:"zone,omitempty"` - Server *string `json:"server,omitempty"` - Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` + Name *string `json:"name,omitempty"` + Region *string `json:"region,omitempty"` + Zone *string `json:"zone,omitempty"` + RegionAffinity *VSphereFailureDomainRegionAffinityApplyConfiguration `json:"regionAffinity,omitempty"` + ZoneAffinity *VSphereFailureDomainZoneAffinityApplyConfiguration `json:"zoneAffinity,omitempty"` + Server *string `json:"server,omitempty"` + Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` } // VSpherePlatformFailureDomainSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformFailureDomainSpec type for use with @@ -42,6 +44,22 @@ func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZone(value stri return b } +// WithRegionAffinity sets the RegionAffinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RegionAffinity field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithRegionAffinity(value *VSphereFailureDomainRegionAffinityApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.RegionAffinity = value + return b +} + +// WithZoneAffinity sets the ZoneAffinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ZoneAffinity field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZoneAffinity(value *VSphereFailureDomainZoneAffinityApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.ZoneAffinity = value + return b +} + // WithServer sets the Server field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Server field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go index d1dcebec4..9eb2f57aa 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // VSpherePlatformLoadBalancerApplyConfiguration represents a declarative configuration of the VSpherePlatformLoadBalancer type for use // with apply. type VSpherePlatformLoadBalancerApplyConfiguration struct { - Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` } // VSpherePlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the VSpherePlatformLoadBalancer type for use with @@ -21,7 +21,7 @@ func VSpherePlatformLoadBalancer() *VSpherePlatformLoadBalancerApplyConfiguratio // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *VSpherePlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *VSpherePlatformLoadBalancerApplyConfiguration { +func (b *VSpherePlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *VSpherePlatformLoadBalancerApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go index e5a58f264..b4982de15 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go @@ -69,7 +69,7 @@ func extractBackup(backup *configv1alpha1.Backup, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *BackupApplyConfiguration) WithKind(value string) *BackupApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *BackupApplyConfiguration) WithKind(value string) *BackupApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *BackupApplyConfiguration) WithAPIVersion(value string) *BackupApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *BackupApplyConfiguration) WithAPIVersion(value string) *BackupApplyConf // If called multiple times, the Name field is set to the value of the last call. func (b *BackupApplyConfiguration) WithName(value string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *BackupApplyConfiguration) WithName(value string) *BackupApplyConfigurat // If called multiple times, the GenerateName field is set to the value of the last call. func (b *BackupApplyConfiguration) WithGenerateName(value string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *BackupApplyConfiguration) WithGenerateName(value string) *BackupApplyCo // If called multiple times, the Namespace field is set to the value of the last call. func (b *BackupApplyConfiguration) WithNamespace(value string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *BackupApplyConfiguration) WithNamespace(value string) *BackupApplyConfi // If called multiple times, the UID field is set to the value of the last call. func (b *BackupApplyConfiguration) WithUID(value types.UID) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *BackupApplyConfiguration) WithUID(value types.UID) *BackupApplyConfigur // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *BackupApplyConfiguration) WithResourceVersion(value string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,7 +131,7 @@ func (b *BackupApplyConfiguration) WithResourceVersion(value string) *BackupAppl // If called multiple times, the Generation field is set to the value of the last call. func (b *BackupApplyConfiguration) WithGeneration(value int64) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -140,7 +140,7 @@ func (b *BackupApplyConfiguration) WithGeneration(value int64) *BackupApplyConfi // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *BackupApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -149,7 +149,7 @@ func (b *BackupApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Bac // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *BackupApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *BackupApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Bac // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *BackupApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *BackupApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) * // overwriting an existing map entries in Labels field with the same key. func (b *BackupApplyConfiguration) WithLabels(entries map[string]string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *BackupApplyConfiguration) WithLabels(entries map[string]string) *Backup // overwriting an existing map entries in Annotations field with the same key. func (b *BackupApplyConfiguration) WithAnnotations(entries map[string]string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -201,7 +201,7 @@ func (b *BackupApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefere if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,7 +212,7 @@ func (b *BackupApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefere func (b *BackupApplyConfiguration) WithFinalizers(values ...string) *BackupApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -242,5 +242,5 @@ func (b *BackupApplyConfiguration) WithStatus(value configv1alpha1.BackupStatus) // GetName retrieves the value of the Name field in the declarative configuration. func (b *BackupApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go index 1e462c3c5..f3d7fdb77 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go @@ -69,7 +69,7 @@ func extractClusterImagePolicy(clusterImagePolicy *configv1alpha1.ClusterImagePo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithKind(value string) *ClusterImagePolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithKind(value string) *ClusterIm // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithAPIVersion(value string) *ClusterImagePolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithName(value string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithName(value string) *ClusterIm // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithGenerateName(value string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithNamespace(value string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithUID(value types.UID) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithResourceVersion(value string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,7 +131,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithGeneration(value int64) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -140,7 +140,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -149,7 +149,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ClusterImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ClusterImagePolicyApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -201,7 +201,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,7 +212,7 @@ func (b *ClusterImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ClusterImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -242,5 +242,5 @@ func (b *ClusterImagePolicyApplyConfiguration) WithStatus(value *ClusterImagePol // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterImagePolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go index bbb25ceb6..e4a3470c4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go @@ -3,14 +3,14 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use // with apply. type ClusterImagePolicySpecApplyConfiguration struct { - Scopes []v1alpha1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *PolicyApplyConfiguration `json:"policy,omitempty"` } // ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with @@ -22,7 +22,7 @@ func ClusterImagePolicySpec() *ClusterImagePolicySpecApplyConfiguration { // WithScopes adds the given value to the Scopes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Scopes field. -func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...v1alpha1.ImageScope) *ClusterImagePolicySpecApplyConfiguration { +func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1alpha1.ImageScope) *ClusterImagePolicySpecApplyConfiguration { for i := range values { b.Scopes = append(b.Scopes, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go new file mode 100644 index 000000000..b935706eb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterMonitoringApplyConfiguration represents a declarative configuration of the ClusterMonitoring type for use +// with apply. +type ClusterMonitoringApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterMonitoringSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1alpha1.ClusterMonitoringStatus `json:"status,omitempty"` +} + +// ClusterMonitoring constructs a declarative configuration of the ClusterMonitoring type for use with +// apply. +func ClusterMonitoring(name string) *ClusterMonitoringApplyConfiguration { + b := &ClusterMonitoringApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterMonitoring") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b +} + +// ExtractClusterMonitoring extracts the applied configuration owned by fieldManager from +// clusterMonitoring. If no managedFields are found in clusterMonitoring for fieldManager, a +// ClusterMonitoringApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterMonitoring must be a unmodified ClusterMonitoring API object that was retrieved from the Kubernetes API. +// ExtractClusterMonitoring provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterMonitoring(clusterMonitoring *configv1alpha1.ClusterMonitoring, fieldManager string) (*ClusterMonitoringApplyConfiguration, error) { + return extractClusterMonitoring(clusterMonitoring, fieldManager, "") +} + +// ExtractClusterMonitoringStatus is the same as ExtractClusterMonitoring except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterMonitoringStatus(clusterMonitoring *configv1alpha1.ClusterMonitoring, fieldManager string) (*ClusterMonitoringApplyConfiguration, error) { + return extractClusterMonitoring(clusterMonitoring, fieldManager, "status") +} + +func extractClusterMonitoring(clusterMonitoring *configv1alpha1.ClusterMonitoring, fieldManager string, subresource string) (*ClusterMonitoringApplyConfiguration, error) { + b := &ClusterMonitoringApplyConfiguration{} + err := managedfields.ExtractInto(clusterMonitoring, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.ClusterMonitoring"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterMonitoring.Name) + + b.WithKind("ClusterMonitoring") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithKind(value string) *ClusterMonitoringApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithAPIVersion(value string) *ClusterMonitoringApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithName(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithGenerateName(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithNamespace(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithUID(value types.UID) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithResourceVersion(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithGeneration(value int64) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterMonitoringApplyConfiguration) WithLabels(entries map[string]string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterMonitoringApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterMonitoringApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterMonitoringApplyConfiguration) WithFinalizers(values ...string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterMonitoringApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithSpec(value *ClusterMonitoringSpecApplyConfiguration) *ClusterMonitoringApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithStatus(value configv1alpha1.ClusterMonitoringStatus) *ClusterMonitoringApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterMonitoringApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go new file mode 100644 index 000000000..28fa2d7c3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterMonitoringSpecApplyConfiguration represents a declarative configuration of the ClusterMonitoringSpec type for use +// with apply. +type ClusterMonitoringSpecApplyConfiguration struct { + UserDefined *UserDefinedMonitoringApplyConfiguration `json:"userDefined,omitempty"` +} + +// ClusterMonitoringSpecApplyConfiguration constructs a declarative configuration of the ClusterMonitoringSpec type for use with +// apply. +func ClusterMonitoringSpec() *ClusterMonitoringSpecApplyConfiguration { + return &ClusterMonitoringSpecApplyConfiguration{} +} + +// WithUserDefined sets the UserDefined field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserDefined field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithUserDefined(value *UserDefinedMonitoringApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.UserDefined = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go index 0f3edbd9f..e870fe6c2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go @@ -3,14 +3,14 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // GatherConfigApplyConfiguration represents a declarative configuration of the GatherConfig type for use // with apply. type GatherConfigApplyConfiguration struct { - DataPolicy *v1alpha1.DataPolicy `json:"dataPolicy,omitempty"` - DisabledGatherers []string `json:"disabledGatherers,omitempty"` + DataPolicy *configv1alpha1.DataPolicy `json:"dataPolicy,omitempty"` + DisabledGatherers []string `json:"disabledGatherers,omitempty"` } // GatherConfigApplyConfiguration constructs a declarative configuration of the GatherConfig type for use with @@ -22,7 +22,7 @@ func GatherConfig() *GatherConfigApplyConfiguration { // WithDataPolicy sets the DataPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DataPolicy field is set to the value of the last call. -func (b *GatherConfigApplyConfiguration) WithDataPolicy(value v1alpha1.DataPolicy) *GatherConfigApplyConfiguration { +func (b *GatherConfigApplyConfiguration) WithDataPolicy(value configv1alpha1.DataPolicy) *GatherConfigApplyConfiguration { b.DataPolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go index f08aa7e47..6595aa782 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go @@ -71,7 +71,7 @@ func extractImagePolicy(imagePolicy *configv1alpha1.ImagePolicy, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithKind(value string) *ImagePolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -79,7 +79,7 @@ func (b *ImagePolicyApplyConfiguration) WithKind(value string) *ImagePolicyApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithAPIVersion(value string) *ImagePolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -88,7 +88,7 @@ func (b *ImagePolicyApplyConfiguration) WithAPIVersion(value string) *ImagePolic // If called multiple times, the Name field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithName(value string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -97,7 +97,7 @@ func (b *ImagePolicyApplyConfiguration) WithName(value string) *ImagePolicyApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithGenerateName(value string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -106,7 +106,7 @@ func (b *ImagePolicyApplyConfiguration) WithGenerateName(value string) *ImagePol // If called multiple times, the Namespace field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithNamespace(value string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -115,7 +115,7 @@ func (b *ImagePolicyApplyConfiguration) WithNamespace(value string) *ImagePolicy // If called multiple times, the UID field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithUID(value types.UID) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -124,7 +124,7 @@ func (b *ImagePolicyApplyConfiguration) WithUID(value types.UID) *ImagePolicyApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithResourceVersion(value string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -133,7 +133,7 @@ func (b *ImagePolicyApplyConfiguration) WithResourceVersion(value string) *Image // If called multiple times, the Generation field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithGeneration(value int64) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -142,7 +142,7 @@ func (b *ImagePolicyApplyConfiguration) WithGeneration(value int64) *ImagePolicy // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -151,7 +151,7 @@ func (b *ImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -160,7 +160,7 @@ func (b *ImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -170,11 +170,11 @@ func (b *ImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -185,11 +185,11 @@ func (b *ImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *I // overwriting an existing map entries in Annotations field with the same key. func (b *ImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -203,7 +203,7 @@ func (b *ImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -214,7 +214,7 @@ func (b *ImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ImagePolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -244,5 +244,5 @@ func (b *ImagePolicyApplyConfiguration) WithStatus(value *ImagePolicyStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImagePolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go index 1c706d99f..ac08e9cf4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go @@ -3,14 +3,14 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use // with apply. type ImagePolicySpecApplyConfiguration struct { - Scopes []v1alpha1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *PolicyApplyConfiguration `json:"policy,omitempty"` } // ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with @@ -22,7 +22,7 @@ func ImagePolicySpec() *ImagePolicySpecApplyConfiguration { // WithScopes adds the given value to the Scopes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Scopes field. -func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...v1alpha1.ImageScope) *ImagePolicySpecApplyConfiguration { +func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1alpha1.ImageScope) *ImagePolicySpecApplyConfiguration { for i := range values { b.Scopes = append(b.Scopes, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go index d743eec8d..cf4ae1f00 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go @@ -69,7 +69,7 @@ func extractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGa // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsD // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *Ins // If called multiple times, the Name field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsD // If called multiple times, the GenerateName field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *I // If called multiple times, the Namespace field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *Insi // If called multiple times, the UID field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *Insight // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,7 +131,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -140,7 +140,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *Insi // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -149,7 +149,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -201,7 +201,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,7 +212,7 @@ func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -242,5 +242,5 @@ func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha1.I // GetName retrieves the value of the Name field in the declarative configuration. func (b *InsightsDataGatherApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go index 157c04281..c03a2d663 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // PolicyIdentityApplyConfiguration represents a declarative configuration of the PolicyIdentity type for use // with apply. type PolicyIdentityApplyConfiguration struct { - MatchPolicy *v1alpha1.IdentityMatchPolicy `json:"matchPolicy,omitempty"` + MatchPolicy *configv1alpha1.IdentityMatchPolicy `json:"matchPolicy,omitempty"` PolicyMatchExactRepository *PolicyMatchExactRepositoryApplyConfiguration `json:"exactRepository,omitempty"` PolicyMatchRemapIdentity *PolicyMatchRemapIdentityApplyConfiguration `json:"remapIdentity,omitempty"` } @@ -23,7 +23,7 @@ func PolicyIdentity() *PolicyIdentityApplyConfiguration { // WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MatchPolicy field is set to the value of the last call. -func (b *PolicyIdentityApplyConfiguration) WithMatchPolicy(value v1alpha1.IdentityMatchPolicy) *PolicyIdentityApplyConfiguration { +func (b *PolicyIdentityApplyConfiguration) WithMatchPolicy(value configv1alpha1.IdentityMatchPolicy) *PolicyIdentityApplyConfiguration { b.MatchPolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go index 8b0499ddb..58870d5eb 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // PolicyMatchExactRepositoryApplyConfiguration represents a declarative configuration of the PolicyMatchExactRepository type for use // with apply. type PolicyMatchExactRepositoryApplyConfiguration struct { - Repository *v1alpha1.IdentityRepositoryPrefix `json:"repository,omitempty"` + Repository *configv1alpha1.IdentityRepositoryPrefix `json:"repository,omitempty"` } // PolicyMatchExactRepositoryApplyConfiguration constructs a declarative configuration of the PolicyMatchExactRepository type for use with @@ -21,7 +21,7 @@ func PolicyMatchExactRepository() *PolicyMatchExactRepositoryApplyConfiguration // WithRepository sets the Repository field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Repository field is set to the value of the last call. -func (b *PolicyMatchExactRepositoryApplyConfiguration) WithRepository(value v1alpha1.IdentityRepositoryPrefix) *PolicyMatchExactRepositoryApplyConfiguration { +func (b *PolicyMatchExactRepositoryApplyConfiguration) WithRepository(value configv1alpha1.IdentityRepositoryPrefix) *PolicyMatchExactRepositoryApplyConfiguration { b.Repository = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go index bfe573c9a..09075d0be 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go @@ -3,14 +3,14 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // PolicyMatchRemapIdentityApplyConfiguration represents a declarative configuration of the PolicyMatchRemapIdentity type for use // with apply. type PolicyMatchRemapIdentityApplyConfiguration struct { - Prefix *v1alpha1.IdentityRepositoryPrefix `json:"prefix,omitempty"` - SignedPrefix *v1alpha1.IdentityRepositoryPrefix `json:"signedPrefix,omitempty"` + Prefix *configv1alpha1.IdentityRepositoryPrefix `json:"prefix,omitempty"` + SignedPrefix *configv1alpha1.IdentityRepositoryPrefix `json:"signedPrefix,omitempty"` } // PolicyMatchRemapIdentityApplyConfiguration constructs a declarative configuration of the PolicyMatchRemapIdentity type for use with @@ -22,7 +22,7 @@ func PolicyMatchRemapIdentity() *PolicyMatchRemapIdentityApplyConfiguration { // WithPrefix sets the Prefix field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Prefix field is set to the value of the last call. -func (b *PolicyMatchRemapIdentityApplyConfiguration) WithPrefix(value v1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { +func (b *PolicyMatchRemapIdentityApplyConfiguration) WithPrefix(value configv1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { b.Prefix = &value return b } @@ -30,7 +30,7 @@ func (b *PolicyMatchRemapIdentityApplyConfiguration) WithPrefix(value v1alpha1.I // WithSignedPrefix sets the SignedPrefix field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SignedPrefix field is set to the value of the last call. -func (b *PolicyMatchRemapIdentityApplyConfiguration) WithSignedPrefix(value v1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { +func (b *PolicyMatchRemapIdentityApplyConfiguration) WithSignedPrefix(value configv1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { b.SignedPrefix = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go index ca4c73f6a..c525e1667 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use // with apply. type PolicyRootOfTrustApplyConfiguration struct { - PolicyType *v1alpha1.PolicyType `json:"policyType,omitempty"` + PolicyType *configv1alpha1.PolicyType `json:"policyType,omitempty"` PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` } @@ -23,7 +23,7 @@ func PolicyRootOfTrust() *PolicyRootOfTrustApplyConfiguration { // WithPolicyType sets the PolicyType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PolicyType field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value v1alpha1.PolicyType) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1alpha1.PolicyType) *PolicyRootOfTrustApplyConfiguration { b.PolicyType = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go index 8fe865a97..981fb2573 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" ) // RetentionPolicyApplyConfiguration represents a declarative configuration of the RetentionPolicy type for use // with apply. type RetentionPolicyApplyConfiguration struct { - RetentionType *v1alpha1.RetentionType `json:"retentionType,omitempty"` + RetentionType *configv1alpha1.RetentionType `json:"retentionType,omitempty"` RetentionNumber *RetentionNumberConfigApplyConfiguration `json:"retentionNumber,omitempty"` RetentionSize *RetentionSizeConfigApplyConfiguration `json:"retentionSize,omitempty"` } @@ -23,7 +23,7 @@ func RetentionPolicy() *RetentionPolicyApplyConfiguration { // WithRetentionType sets the RetentionType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RetentionType field is set to the value of the last call. -func (b *RetentionPolicyApplyConfiguration) WithRetentionType(value v1alpha1.RetentionType) *RetentionPolicyApplyConfiguration { +func (b *RetentionPolicyApplyConfiguration) WithRetentionType(value configv1alpha1.RetentionType) *RetentionPolicyApplyConfiguration { b.RetentionType = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/userdefinedmonitoring.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/userdefinedmonitoring.go new file mode 100644 index 000000000..5aa6998f9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/userdefinedmonitoring.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// UserDefinedMonitoringApplyConfiguration represents a declarative configuration of the UserDefinedMonitoring type for use +// with apply. +type UserDefinedMonitoringApplyConfiguration struct { + Mode *configv1alpha1.UserDefinedMode `json:"mode,omitempty"` +} + +// UserDefinedMonitoringApplyConfiguration constructs a declarative configuration of the UserDefinedMonitoring type for use with +// apply. +func UserDefinedMonitoring() *UserDefinedMonitoringApplyConfiguration { + return &UserDefinedMonitoringApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *UserDefinedMonitoringApplyConfiguration) WithMode(value configv1alpha1.UserDefinedMode) *UserDefinedMonitoringApplyConfiguration { + b.Mode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 003e4d2ef..5af2eeb17 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -3,8 +3,8 @@ package internal import ( - "fmt" - "sync" + fmt "fmt" + sync "sync" typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) @@ -139,6 +139,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.AWSPlatformStatus map: fields: + - name: cloudLoadBalancerConfig + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + default: + dnsType: PlatformDefault - name: region type: scalar: string @@ -2167,6 +2172,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: cgroupMode type: scalar: string + - name: minimumKubeletVersion + type: + scalar: string + default: "" - name: workerLatencyProfile type: scalar: string @@ -2197,9 +2206,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: namedType: com.github.openshift.api.config.v1.NutanixResourceIdentifier - elementRelationship: associative - keys: - - type + elementRelationship: atomic - name: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer map: fields: @@ -3044,6 +3051,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.Release map: fields: + - name: architecture + type: + scalar: string - name: channels type: list: @@ -3382,6 +3392,45 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup + map: + fields: + - name: hostGroup + type: + scalar: string + default: "" + - name: vmGroup + type: + scalar: string + default: "" + - name: vmHostRule + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity + map: + fields: + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity + map: + fields: + - name: hostGroup + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: hostGroup + discriminatorValue: HostGroup - name: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec map: fields: @@ -3393,6 +3442,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: regionAffinity + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity - name: server type: scalar: string @@ -3405,6 +3457,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: zoneAffinity + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity - name: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer map: fields: @@ -3655,6 +3710,46 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: com.github.openshift.api.config.v1alpha1.ClusterMonitoring + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha1.ClusterMonitoringSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha1.ClusterMonitoringStatus + default: {} +- name: com.github.openshift.api.config.v1alpha1.ClusterMonitoringSpec + map: + fields: + - name: userDefined + type: + namedType: com.github.openshift.api.config.v1alpha1.UserDefinedMonitoring + default: {} +- name: com.github.openshift.api.config.v1alpha1.ClusterMonitoringStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable - name: com.github.openshift.api.config.v1alpha1.EtcdBackupSpec map: fields: @@ -3905,6 +4000,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxSizeOfBackupsGb type: scalar: numeric +- name: com.github.openshift.api.config.v1alpha1.UserDefinedMonitoring + map: + fields: + - name: mode + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ConfigMapKeySelector map: fields: diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go index 29896542d..f9ed357b6 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go @@ -3,8 +3,8 @@ package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go index 5751cebe7..20e56733a 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type APIServersGetter interface { // APIServerInterface has methods to work with APIServer resources. type APIServerInterface interface { - Create(ctx context.Context, aPIServer *v1.APIServer, opts metav1.CreateOptions) (*v1.APIServer, error) - Update(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (*v1.APIServer, error) + Create(ctx context.Context, aPIServer *configv1.APIServer, opts metav1.CreateOptions) (*configv1.APIServer, error) + Update(ctx context.Context, aPIServer *configv1.APIServer, opts metav1.UpdateOptions) (*configv1.APIServer, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (*v1.APIServer, error) + UpdateStatus(ctx context.Context, aPIServer *configv1.APIServer, opts metav1.UpdateOptions) (*configv1.APIServer, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.APIServer, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.APIServerList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.APIServer, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.APIServerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIServer, err error) - Apply(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.APIServer, err error) + Apply(ctx context.Context, aPIServer *applyconfigurationsconfigv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.APIServer, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) + ApplyStatus(ctx context.Context, aPIServer *applyconfigurationsconfigv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.APIServer, err error) APIServerExpansion } // aPIServers implements APIServerInterface type aPIServers struct { - *gentype.ClientWithListAndApply[*v1.APIServer, *v1.APIServerList, *configv1.APIServerApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.APIServer, *configv1.APIServerList, *applyconfigurationsconfigv1.APIServerApplyConfiguration] } // newAPIServers returns a APIServers func newAPIServers(c *ConfigV1Client) *aPIServers { return &aPIServers{ - gentype.NewClientWithListAndApply[*v1.APIServer, *v1.APIServerList, *configv1.APIServerApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.APIServer, *configv1.APIServerList, *applyconfigurationsconfigv1.APIServerApplyConfiguration]( "apiservers", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.APIServer { return &v1.APIServer{} }, - func() *v1.APIServerList { return &v1.APIServerList{} }), + func() *configv1.APIServer { return &configv1.APIServer{} }, + func() *configv1.APIServerList { return &configv1.APIServerList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go index 89b1779b3..f2f9cae61 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type AuthenticationsGetter interface { // AuthenticationInterface has methods to work with Authentication resources. type AuthenticationInterface interface { - Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (*v1.Authentication, error) - Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error) + Create(ctx context.Context, authentication *configv1.Authentication, opts metav1.CreateOptions) (*configv1.Authentication, error) + Update(ctx context.Context, authentication *configv1.Authentication, opts metav1.UpdateOptions) (*configv1.Authentication, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error) + UpdateStatus(ctx context.Context, authentication *configv1.Authentication, opts metav1.UpdateOptions) (*configv1.Authentication, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Authentication, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.AuthenticationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Authentication, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.AuthenticationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error) - Apply(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Authentication, err error) + Apply(ctx context.Context, authentication *applyconfigurationsconfigv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Authentication, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) + ApplyStatus(ctx context.Context, authentication *applyconfigurationsconfigv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Authentication, err error) AuthenticationExpansion } // authentications implements AuthenticationInterface type authentications struct { - *gentype.ClientWithListAndApply[*v1.Authentication, *v1.AuthenticationList, *configv1.AuthenticationApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Authentication, *configv1.AuthenticationList, *applyconfigurationsconfigv1.AuthenticationApplyConfiguration] } // newAuthentications returns a Authentications func newAuthentications(c *ConfigV1Client) *authentications { return &authentications{ - gentype.NewClientWithListAndApply[*v1.Authentication, *v1.AuthenticationList, *configv1.AuthenticationApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Authentication, *configv1.AuthenticationList, *applyconfigurationsconfigv1.AuthenticationApplyConfiguration]( "authentications", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Authentication { return &v1.Authentication{} }, - func() *v1.AuthenticationList { return &v1.AuthenticationList{} }), + func() *configv1.Authentication { return &configv1.Authentication{} }, + func() *configv1.AuthenticationList { return &configv1.AuthenticationList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go index 2ecfb68d9..6e144b1f2 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,32 +22,33 @@ type BuildsGetter interface { // BuildInterface has methods to work with Build resources. type BuildInterface interface { - Create(ctx context.Context, build *v1.Build, opts metav1.CreateOptions) (*v1.Build, error) - Update(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (*v1.Build, error) + Create(ctx context.Context, build *configv1.Build, opts metav1.CreateOptions) (*configv1.Build, error) + Update(ctx context.Context, build *configv1.Build, opts metav1.UpdateOptions) (*configv1.Build, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Build, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.BuildList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Build, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.BuildList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Build, err error) - Apply(ctx context.Context, build *configv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Build, err error) + Apply(ctx context.Context, build *applyconfigurationsconfigv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Build, err error) BuildExpansion } // builds implements BuildInterface type builds struct { - *gentype.ClientWithListAndApply[*v1.Build, *v1.BuildList, *configv1.BuildApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Build, *configv1.BuildList, *applyconfigurationsconfigv1.BuildApplyConfiguration] } // newBuilds returns a Builds func newBuilds(c *ConfigV1Client) *builds { return &builds{ - gentype.NewClientWithListAndApply[*v1.Build, *v1.BuildList, *configv1.BuildApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Build, *configv1.BuildList, *applyconfigurationsconfigv1.BuildApplyConfiguration]( "builds", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Build { return &v1.Build{} }, - func() *v1.BuildList { return &v1.BuildList{} }), + func() *configv1.Build { return &configv1.Build{} }, + func() *configv1.BuildList { return &configv1.BuildList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go index b1015c40d..a2f03a502 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ClusterOperatorsGetter interface { // ClusterOperatorInterface has methods to work with ClusterOperator resources. type ClusterOperatorInterface interface { - Create(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.CreateOptions) (*v1.ClusterOperator, error) - Update(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error) + Create(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts metav1.CreateOptions) (*configv1.ClusterOperator, error) + Update(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts metav1.UpdateOptions) (*configv1.ClusterOperator, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error) + UpdateStatus(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts metav1.UpdateOptions) (*configv1.ClusterOperator, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterOperator, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterOperatorList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ClusterOperator, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ClusterOperatorList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterOperator, err error) - Apply(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ClusterOperator, err error) + Apply(ctx context.Context, clusterOperator *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterOperator, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) + ApplyStatus(ctx context.Context, clusterOperator *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterOperator, err error) ClusterOperatorExpansion } // clusterOperators implements ClusterOperatorInterface type clusterOperators struct { - *gentype.ClientWithListAndApply[*v1.ClusterOperator, *v1.ClusterOperatorList, *configv1.ClusterOperatorApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.ClusterOperator, *configv1.ClusterOperatorList, *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration] } // newClusterOperators returns a ClusterOperators func newClusterOperators(c *ConfigV1Client) *clusterOperators { return &clusterOperators{ - gentype.NewClientWithListAndApply[*v1.ClusterOperator, *v1.ClusterOperatorList, *configv1.ClusterOperatorApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.ClusterOperator, *configv1.ClusterOperatorList, *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration]( "clusteroperators", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ClusterOperator { return &v1.ClusterOperator{} }, - func() *v1.ClusterOperatorList { return &v1.ClusterOperatorList{} }), + func() *configv1.ClusterOperator { return &configv1.ClusterOperator{} }, + func() *configv1.ClusterOperatorList { return &configv1.ClusterOperatorList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go index 2b6717096..cb03327d9 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ClusterVersionsGetter interface { // ClusterVersionInterface has methods to work with ClusterVersion resources. type ClusterVersionInterface interface { - Create(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.CreateOptions) (*v1.ClusterVersion, error) - Update(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (*v1.ClusterVersion, error) + Create(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts metav1.CreateOptions) (*configv1.ClusterVersion, error) + Update(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts metav1.UpdateOptions) (*configv1.ClusterVersion, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (*v1.ClusterVersion, error) + UpdateStatus(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts metav1.UpdateOptions) (*configv1.ClusterVersion, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterVersion, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterVersionList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ClusterVersion, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ClusterVersionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterVersion, err error) - Apply(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ClusterVersion, err error) + Apply(ctx context.Context, clusterVersion *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterVersion, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) + ApplyStatus(ctx context.Context, clusterVersion *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterVersion, err error) ClusterVersionExpansion } // clusterVersions implements ClusterVersionInterface type clusterVersions struct { - *gentype.ClientWithListAndApply[*v1.ClusterVersion, *v1.ClusterVersionList, *configv1.ClusterVersionApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.ClusterVersion, *configv1.ClusterVersionList, *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration] } // newClusterVersions returns a ClusterVersions func newClusterVersions(c *ConfigV1Client) *clusterVersions { return &clusterVersions{ - gentype.NewClientWithListAndApply[*v1.ClusterVersion, *v1.ClusterVersionList, *configv1.ClusterVersionApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.ClusterVersion, *configv1.ClusterVersionList, *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration]( "clusterversions", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ClusterVersion { return &v1.ClusterVersion{} }, - func() *v1.ClusterVersionList { return &v1.ClusterVersionList{} }), + func() *configv1.ClusterVersion { return &configv1.ClusterVersion{} }, + func() *configv1.ClusterVersionList { return &configv1.ClusterVersionList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index de4f2fa32..bbb0b312e 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -3,10 +3,10 @@ package v1 import ( - "net/http" + http "net/http" - v1 "github.com/openshift/api/config/v1" - "github.com/openshift/client-go/config/clientset/versioned/scheme" + configv1 "github.com/openshift/api/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -169,10 +169,10 @@ func New(c rest.Interface) *ConfigV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := configv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go index 994d5994d..ead87be18 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ConsolesGetter interface { // ConsoleInterface has methods to work with Console resources. type ConsoleInterface interface { - Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (*v1.Console, error) - Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error) + Create(ctx context.Context, console *configv1.Console, opts metav1.CreateOptions) (*configv1.Console, error) + Update(ctx context.Context, console *configv1.Console, opts metav1.UpdateOptions) (*configv1.Console, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error) + UpdateStatus(ctx context.Context, console *configv1.Console, opts metav1.UpdateOptions) (*configv1.Console, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Console, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ConsoleList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Console, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ConsoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error) - Apply(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Console, err error) + Apply(ctx context.Context, console *applyconfigurationsconfigv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Console, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) + ApplyStatus(ctx context.Context, console *applyconfigurationsconfigv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Console, err error) ConsoleExpansion } // consoles implements ConsoleInterface type consoles struct { - *gentype.ClientWithListAndApply[*v1.Console, *v1.ConsoleList, *configv1.ConsoleApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Console, *configv1.ConsoleList, *applyconfigurationsconfigv1.ConsoleApplyConfiguration] } // newConsoles returns a Consoles func newConsoles(c *ConfigV1Client) *consoles { return &consoles{ - gentype.NewClientWithListAndApply[*v1.Console, *v1.ConsoleList, *configv1.ConsoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Console, *configv1.ConsoleList, *applyconfigurationsconfigv1.ConsoleApplyConfiguration]( "consoles", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Console { return &v1.Console{} }, - func() *v1.ConsoleList { return &v1.ConsoleList{} }), + func() *configv1.Console { return &configv1.Console{} }, + func() *configv1.ConsoleList { return &configv1.ConsoleList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go index b91aa0d0d..76efd8610 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type DNSesGetter interface { // DNSInterface has methods to work with DNS resources. type DNSInterface interface { - Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (*v1.DNS, error) - Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error) + Create(ctx context.Context, dNS *configv1.DNS, opts metav1.CreateOptions) (*configv1.DNS, error) + Update(ctx context.Context, dNS *configv1.DNS, opts metav1.UpdateOptions) (*configv1.DNS, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error) + UpdateStatus(ctx context.Context, dNS *configv1.DNS, opts metav1.UpdateOptions) (*configv1.DNS, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DNS, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.DNSList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.DNS, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.DNSList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error) - Apply(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.DNS, err error) + Apply(ctx context.Context, dNS *applyconfigurationsconfigv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.DNS, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) + ApplyStatus(ctx context.Context, dNS *applyconfigurationsconfigv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.DNS, err error) DNSExpansion } // dNSes implements DNSInterface type dNSes struct { - *gentype.ClientWithListAndApply[*v1.DNS, *v1.DNSList, *configv1.DNSApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.DNS, *configv1.DNSList, *applyconfigurationsconfigv1.DNSApplyConfiguration] } // newDNSes returns a DNSes func newDNSes(c *ConfigV1Client) *dNSes { return &dNSes{ - gentype.NewClientWithListAndApply[*v1.DNS, *v1.DNSList, *configv1.DNSApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.DNS, *configv1.DNSList, *applyconfigurationsconfigv1.DNSApplyConfiguration]( "dnses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.DNS { return &v1.DNS{} }, - func() *v1.DNSList { return &v1.DNSList{} }), + func() *configv1.DNS { return &configv1.DNS{} }, + func() *configv1.DNSList { return &configv1.DNSList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go index fcbfe3577..2a41c2e73 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type FeatureGatesGetter interface { // FeatureGateInterface has methods to work with FeatureGate resources. type FeatureGateInterface interface { - Create(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.CreateOptions) (*v1.FeatureGate, error) - Update(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (*v1.FeatureGate, error) + Create(ctx context.Context, featureGate *configv1.FeatureGate, opts metav1.CreateOptions) (*configv1.FeatureGate, error) + Update(ctx context.Context, featureGate *configv1.FeatureGate, opts metav1.UpdateOptions) (*configv1.FeatureGate, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (*v1.FeatureGate, error) + UpdateStatus(ctx context.Context, featureGate *configv1.FeatureGate, opts metav1.UpdateOptions) (*configv1.FeatureGate, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FeatureGate, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.FeatureGateList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.FeatureGate, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.FeatureGateList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FeatureGate, err error) - Apply(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.FeatureGate, err error) + Apply(ctx context.Context, featureGate *applyconfigurationsconfigv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.FeatureGate, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) + ApplyStatus(ctx context.Context, featureGate *applyconfigurationsconfigv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.FeatureGate, err error) FeatureGateExpansion } // featureGates implements FeatureGateInterface type featureGates struct { - *gentype.ClientWithListAndApply[*v1.FeatureGate, *v1.FeatureGateList, *configv1.FeatureGateApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.FeatureGate, *configv1.FeatureGateList, *applyconfigurationsconfigv1.FeatureGateApplyConfiguration] } // newFeatureGates returns a FeatureGates func newFeatureGates(c *ConfigV1Client) *featureGates { return &featureGates{ - gentype.NewClientWithListAndApply[*v1.FeatureGate, *v1.FeatureGateList, *configv1.FeatureGateApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.FeatureGate, *configv1.FeatureGateList, *applyconfigurationsconfigv1.FeatureGateApplyConfiguration]( "featuregates", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.FeatureGate { return &v1.FeatureGate{} }, - func() *v1.FeatureGateList { return &v1.FeatureGateList{} }), + func() *configv1.FeatureGate { return &configv1.FeatureGate{} }, + func() *configv1.FeatureGateList { return &configv1.FeatureGateList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go index 536693d3e..2950a19c6 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ImagesGetter interface { // ImageInterface has methods to work with Image resources. type ImageInterface interface { - Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (*v1.Image, error) - Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error) + Create(ctx context.Context, image *configv1.Image, opts metav1.CreateOptions) (*configv1.Image, error) + Update(ctx context.Context, image *configv1.Image, opts metav1.UpdateOptions) (*configv1.Image, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error) + UpdateStatus(ctx context.Context, image *configv1.Image, opts metav1.UpdateOptions) (*configv1.Image, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Image, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Image, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) - Apply(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Image, err error) + Apply(ctx context.Context, image *applyconfigurationsconfigv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Image, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) + ApplyStatus(ctx context.Context, image *applyconfigurationsconfigv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Image, err error) ImageExpansion } // images implements ImageInterface type images struct { - *gentype.ClientWithListAndApply[*v1.Image, *v1.ImageList, *configv1.ImageApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Image, *configv1.ImageList, *applyconfigurationsconfigv1.ImageApplyConfiguration] } // newImages returns a Images func newImages(c *ConfigV1Client) *images { return &images{ - gentype.NewClientWithListAndApply[*v1.Image, *v1.ImageList, *configv1.ImageApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Image, *configv1.ImageList, *applyconfigurationsconfigv1.ImageApplyConfiguration]( "images", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Image { return &v1.Image{} }, - func() *v1.ImageList { return &v1.ImageList{} }), + func() *configv1.Image { return &configv1.Image{} }, + func() *configv1.ImageList { return &configv1.ImageList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go index bc8a8b342..ce52d6c81 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,32 +22,33 @@ type ImageContentPoliciesGetter interface { // ImageContentPolicyInterface has methods to work with ImageContentPolicy resources. type ImageContentPolicyInterface interface { - Create(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.CreateOptions) (*v1.ImageContentPolicy, error) - Update(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.UpdateOptions) (*v1.ImageContentPolicy, error) + Create(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts metav1.CreateOptions) (*configv1.ImageContentPolicy, error) + Update(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts metav1.UpdateOptions) (*configv1.ImageContentPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageContentPolicy, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageContentPolicyList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImageContentPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageContentPolicyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageContentPolicy, err error) - Apply(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageContentPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImageContentPolicy, err error) + Apply(ctx context.Context, imageContentPolicy *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageContentPolicy, err error) ImageContentPolicyExpansion } // imageContentPolicies implements ImageContentPolicyInterface type imageContentPolicies struct { - *gentype.ClientWithListAndApply[*v1.ImageContentPolicy, *v1.ImageContentPolicyList, *configv1.ImageContentPolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.ImageContentPolicy, *configv1.ImageContentPolicyList, *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration] } // newImageContentPolicies returns a ImageContentPolicies func newImageContentPolicies(c *ConfigV1Client) *imageContentPolicies { return &imageContentPolicies{ - gentype.NewClientWithListAndApply[*v1.ImageContentPolicy, *v1.ImageContentPolicyList, *configv1.ImageContentPolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.ImageContentPolicy, *configv1.ImageContentPolicyList, *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration]( "imagecontentpolicies", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ImageContentPolicy { return &v1.ImageContentPolicy{} }, - func() *v1.ImageContentPolicyList { return &v1.ImageContentPolicyList{} }), + func() *configv1.ImageContentPolicy { return &configv1.ImageContentPolicy{} }, + func() *configv1.ImageContentPolicyList { return &configv1.ImageContentPolicyList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go index eca9b95b6..70018dd7f 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ImageDigestMirrorSetsGetter interface { // ImageDigestMirrorSetInterface has methods to work with ImageDigestMirrorSet resources. type ImageDigestMirrorSetInterface interface { - Create(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.CreateOptions) (*v1.ImageDigestMirrorSet, error) - Update(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*v1.ImageDigestMirrorSet, error) + Create(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts metav1.CreateOptions) (*configv1.ImageDigestMirrorSet, error) + Update(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageDigestMirrorSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*v1.ImageDigestMirrorSet, error) + UpdateStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageDigestMirrorSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageDigestMirrorSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageDigestMirrorSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImageDigestMirrorSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageDigestMirrorSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageDigestMirrorSet, err error) - Apply(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImageDigestMirrorSet, err error) + Apply(ctx context.Context, imageDigestMirrorSet *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageDigestMirrorSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) + ApplyStatus(ctx context.Context, imageDigestMirrorSet *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageDigestMirrorSet, err error) ImageDigestMirrorSetExpansion } // imageDigestMirrorSets implements ImageDigestMirrorSetInterface type imageDigestMirrorSets struct { - *gentype.ClientWithListAndApply[*v1.ImageDigestMirrorSet, *v1.ImageDigestMirrorSetList, *configv1.ImageDigestMirrorSetApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.ImageDigestMirrorSet, *configv1.ImageDigestMirrorSetList, *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration] } // newImageDigestMirrorSets returns a ImageDigestMirrorSets func newImageDigestMirrorSets(c *ConfigV1Client) *imageDigestMirrorSets { return &imageDigestMirrorSets{ - gentype.NewClientWithListAndApply[*v1.ImageDigestMirrorSet, *v1.ImageDigestMirrorSetList, *configv1.ImageDigestMirrorSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.ImageDigestMirrorSet, *configv1.ImageDigestMirrorSetList, *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration]( "imagedigestmirrorsets", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ImageDigestMirrorSet { return &v1.ImageDigestMirrorSet{} }, - func() *v1.ImageDigestMirrorSetList { return &v1.ImageDigestMirrorSetList{} }), + func() *configv1.ImageDigestMirrorSet { return &configv1.ImageDigestMirrorSet{} }, + func() *configv1.ImageDigestMirrorSetList { return &configv1.ImageDigestMirrorSetList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go index add148a55..ca3c6e0be 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ImageTagMirrorSetsGetter interface { // ImageTagMirrorSetInterface has methods to work with ImageTagMirrorSet resources. type ImageTagMirrorSetInterface interface { - Create(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.CreateOptions) (*v1.ImageTagMirrorSet, error) - Update(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*v1.ImageTagMirrorSet, error) + Create(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts metav1.CreateOptions) (*configv1.ImageTagMirrorSet, error) + Update(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageTagMirrorSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*v1.ImageTagMirrorSet, error) + UpdateStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageTagMirrorSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageTagMirrorSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageTagMirrorSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImageTagMirrorSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageTagMirrorSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageTagMirrorSet, err error) - Apply(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImageTagMirrorSet, err error) + Apply(ctx context.Context, imageTagMirrorSet *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageTagMirrorSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) + ApplyStatus(ctx context.Context, imageTagMirrorSet *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageTagMirrorSet, err error) ImageTagMirrorSetExpansion } // imageTagMirrorSets implements ImageTagMirrorSetInterface type imageTagMirrorSets struct { - *gentype.ClientWithListAndApply[*v1.ImageTagMirrorSet, *v1.ImageTagMirrorSetList, *configv1.ImageTagMirrorSetApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.ImageTagMirrorSet, *configv1.ImageTagMirrorSetList, *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration] } // newImageTagMirrorSets returns a ImageTagMirrorSets func newImageTagMirrorSets(c *ConfigV1Client) *imageTagMirrorSets { return &imageTagMirrorSets{ - gentype.NewClientWithListAndApply[*v1.ImageTagMirrorSet, *v1.ImageTagMirrorSetList, *configv1.ImageTagMirrorSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.ImageTagMirrorSet, *configv1.ImageTagMirrorSetList, *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration]( "imagetagmirrorsets", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ImageTagMirrorSet { return &v1.ImageTagMirrorSet{} }, - func() *v1.ImageTagMirrorSetList { return &v1.ImageTagMirrorSetList{} }), + func() *configv1.ImageTagMirrorSet { return &configv1.ImageTagMirrorSet{} }, + func() *configv1.ImageTagMirrorSetList { return &configv1.ImageTagMirrorSetList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go index 4d8550833..eb307026c 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type InfrastructuresGetter interface { // InfrastructureInterface has methods to work with Infrastructure resources. type InfrastructureInterface interface { - Create(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.CreateOptions) (*v1.Infrastructure, error) - Update(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (*v1.Infrastructure, error) + Create(ctx context.Context, infrastructure *configv1.Infrastructure, opts metav1.CreateOptions) (*configv1.Infrastructure, error) + Update(ctx context.Context, infrastructure *configv1.Infrastructure, opts metav1.UpdateOptions) (*configv1.Infrastructure, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (*v1.Infrastructure, error) + UpdateStatus(ctx context.Context, infrastructure *configv1.Infrastructure, opts metav1.UpdateOptions) (*configv1.Infrastructure, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Infrastructure, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.InfrastructureList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Infrastructure, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.InfrastructureList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Infrastructure, err error) - Apply(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Infrastructure, err error) + Apply(ctx context.Context, infrastructure *applyconfigurationsconfigv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Infrastructure, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) + ApplyStatus(ctx context.Context, infrastructure *applyconfigurationsconfigv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Infrastructure, err error) InfrastructureExpansion } // infrastructures implements InfrastructureInterface type infrastructures struct { - *gentype.ClientWithListAndApply[*v1.Infrastructure, *v1.InfrastructureList, *configv1.InfrastructureApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Infrastructure, *configv1.InfrastructureList, *applyconfigurationsconfigv1.InfrastructureApplyConfiguration] } // newInfrastructures returns a Infrastructures func newInfrastructures(c *ConfigV1Client) *infrastructures { return &infrastructures{ - gentype.NewClientWithListAndApply[*v1.Infrastructure, *v1.InfrastructureList, *configv1.InfrastructureApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Infrastructure, *configv1.InfrastructureList, *applyconfigurationsconfigv1.InfrastructureApplyConfiguration]( "infrastructures", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Infrastructure { return &v1.Infrastructure{} }, - func() *v1.InfrastructureList { return &v1.InfrastructureList{} }), + func() *configv1.Infrastructure { return &configv1.Infrastructure{} }, + func() *configv1.InfrastructureList { return &configv1.InfrastructureList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go index cc4fa53c4..81057042d 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) - Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + Create(ctx context.Context, ingress *configv1.Ingress, opts metav1.CreateOptions) (*configv1.Ingress, error) + Update(ctx context.Context, ingress *configv1.Ingress, opts metav1.UpdateOptions) (*configv1.Ingress, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *configv1.Ingress, opts metav1.UpdateOptions) (*configv1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Ingress, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Ingress, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.IngressList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) - Apply(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsconfigv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Ingress, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + ApplyStatus(ctx context.Context, ingress *applyconfigurationsconfigv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *configv1.IngressApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Ingress, *configv1.IngressList, *applyconfigurationsconfigv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ConfigV1Client) *ingresses { return &ingresses{ - gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *configv1.IngressApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Ingress, *configv1.IngressList, *applyconfigurationsconfigv1.IngressApplyConfiguration]( "ingresses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Ingress { return &v1.Ingress{} }, - func() *v1.IngressList { return &v1.IngressList{} }), + func() *configv1.Ingress { return &configv1.Ingress{} }, + func() *configv1.IngressList { return &configv1.IngressList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go index ca61ca93a..c58e0f211 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type NetworksGetter interface { // NetworkInterface has methods to work with Network resources. type NetworkInterface interface { - Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (*v1.Network, error) - Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error) + Create(ctx context.Context, network *configv1.Network, opts metav1.CreateOptions) (*configv1.Network, error) + Update(ctx context.Context, network *configv1.Network, opts metav1.UpdateOptions) (*configv1.Network, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error) + UpdateStatus(ctx context.Context, network *configv1.Network, opts metav1.UpdateOptions) (*configv1.Network, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Network, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Network, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.NetworkList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error) - Apply(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Network, err error) + Apply(ctx context.Context, network *applyconfigurationsconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Network, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) + ApplyStatus(ctx context.Context, network *applyconfigurationsconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Network, err error) NetworkExpansion } // networks implements NetworkInterface type networks struct { - *gentype.ClientWithListAndApply[*v1.Network, *v1.NetworkList, *configv1.NetworkApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Network, *configv1.NetworkList, *applyconfigurationsconfigv1.NetworkApplyConfiguration] } // newNetworks returns a Networks func newNetworks(c *ConfigV1Client) *networks { return &networks{ - gentype.NewClientWithListAndApply[*v1.Network, *v1.NetworkList, *configv1.NetworkApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Network, *configv1.NetworkList, *applyconfigurationsconfigv1.NetworkApplyConfiguration]( "networks", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Network { return &v1.Network{} }, - func() *v1.NetworkList { return &v1.NetworkList{} }), + func() *configv1.Network { return &configv1.Network{} }, + func() *configv1.NetworkList { return &configv1.NetworkList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go index 0ec0e5e5a..b573b1598 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type NodesGetter interface { // NodeInterface has methods to work with Node resources. type NodeInterface interface { - Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) - Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + Create(ctx context.Context, node *configv1.Node, opts metav1.CreateOptions) (*configv1.Node, error) + Update(ctx context.Context, node *configv1.Node, opts metav1.UpdateOptions) (*configv1.Node, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + UpdateStatus(ctx context.Context, node *configv1.Node, opts metav1.UpdateOptions) (*configv1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Node, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.NodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) - Apply(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Node, err error) + Apply(ctx context.Context, node *applyconfigurationsconfigv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Node, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + ApplyStatus(ctx context.Context, node *applyconfigurationsconfigv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *configv1.NodeApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Node, *configv1.NodeList, *applyconfigurationsconfigv1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *ConfigV1Client) *nodes { return &nodes{ - gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *configv1.NodeApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Node, *configv1.NodeList, *applyconfigurationsconfigv1.NodeApplyConfiguration]( "nodes", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Node { return &v1.Node{} }, - func() *v1.NodeList { return &v1.NodeList{} }), + func() *configv1.Node { return &configv1.Node{} }, + func() *configv1.NodeList { return &configv1.NodeList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go index d6f7814aa..755a93873 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type OAuthsGetter interface { // OAuthInterface has methods to work with OAuth resources. type OAuthInterface interface { - Create(ctx context.Context, oAuth *v1.OAuth, opts metav1.CreateOptions) (*v1.OAuth, error) - Update(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (*v1.OAuth, error) + Create(ctx context.Context, oAuth *configv1.OAuth, opts metav1.CreateOptions) (*configv1.OAuth, error) + Update(ctx context.Context, oAuth *configv1.OAuth, opts metav1.UpdateOptions) (*configv1.OAuth, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (*v1.OAuth, error) + UpdateStatus(ctx context.Context, oAuth *configv1.OAuth, opts metav1.UpdateOptions) (*configv1.OAuth, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OAuth, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.OAuthList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.OAuth, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.OAuthList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OAuth, err error) - Apply(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.OAuth, err error) + Apply(ctx context.Context, oAuth *applyconfigurationsconfigv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OAuth, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) + ApplyStatus(ctx context.Context, oAuth *applyconfigurationsconfigv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OAuth, err error) OAuthExpansion } // oAuths implements OAuthInterface type oAuths struct { - *gentype.ClientWithListAndApply[*v1.OAuth, *v1.OAuthList, *configv1.OAuthApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.OAuth, *configv1.OAuthList, *applyconfigurationsconfigv1.OAuthApplyConfiguration] } // newOAuths returns a OAuths func newOAuths(c *ConfigV1Client) *oAuths { return &oAuths{ - gentype.NewClientWithListAndApply[*v1.OAuth, *v1.OAuthList, *configv1.OAuthApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.OAuth, *configv1.OAuthList, *applyconfigurationsconfigv1.OAuthApplyConfiguration]( "oauths", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.OAuth { return &v1.OAuth{} }, - func() *v1.OAuthList { return &v1.OAuthList{} }), + func() *configv1.OAuth { return &configv1.OAuth{} }, + func() *configv1.OAuthList { return &configv1.OAuthList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go index c8a291486..e3ba1b8ab 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type OperatorHubsGetter interface { // OperatorHubInterface has methods to work with OperatorHub resources. type OperatorHubInterface interface { - Create(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.CreateOptions) (*v1.OperatorHub, error) - Update(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (*v1.OperatorHub, error) + Create(ctx context.Context, operatorHub *configv1.OperatorHub, opts metav1.CreateOptions) (*configv1.OperatorHub, error) + Update(ctx context.Context, operatorHub *configv1.OperatorHub, opts metav1.UpdateOptions) (*configv1.OperatorHub, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (*v1.OperatorHub, error) + UpdateStatus(ctx context.Context, operatorHub *configv1.OperatorHub, opts metav1.UpdateOptions) (*configv1.OperatorHub, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OperatorHub, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.OperatorHubList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.OperatorHub, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.OperatorHubList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OperatorHub, err error) - Apply(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.OperatorHub, err error) + Apply(ctx context.Context, operatorHub *applyconfigurationsconfigv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OperatorHub, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) + ApplyStatus(ctx context.Context, operatorHub *applyconfigurationsconfigv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OperatorHub, err error) OperatorHubExpansion } // operatorHubs implements OperatorHubInterface type operatorHubs struct { - *gentype.ClientWithListAndApply[*v1.OperatorHub, *v1.OperatorHubList, *configv1.OperatorHubApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.OperatorHub, *configv1.OperatorHubList, *applyconfigurationsconfigv1.OperatorHubApplyConfiguration] } // newOperatorHubs returns a OperatorHubs func newOperatorHubs(c *ConfigV1Client) *operatorHubs { return &operatorHubs{ - gentype.NewClientWithListAndApply[*v1.OperatorHub, *v1.OperatorHubList, *configv1.OperatorHubApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.OperatorHub, *configv1.OperatorHubList, *applyconfigurationsconfigv1.OperatorHubApplyConfiguration]( "operatorhubs", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.OperatorHub { return &v1.OperatorHub{} }, - func() *v1.OperatorHubList { return &v1.OperatorHubList{} }), + func() *configv1.OperatorHub { return &configv1.OperatorHub{} }, + func() *configv1.OperatorHubList { return &configv1.OperatorHubList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go index 75cd01ec1..5cde353a6 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ProjectsGetter interface { // ProjectInterface has methods to work with Project resources. type ProjectInterface interface { - Create(ctx context.Context, project *v1.Project, opts metav1.CreateOptions) (*v1.Project, error) - Update(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (*v1.Project, error) + Create(ctx context.Context, project *configv1.Project, opts metav1.CreateOptions) (*configv1.Project, error) + Update(ctx context.Context, project *configv1.Project, opts metav1.UpdateOptions) (*configv1.Project, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (*v1.Project, error) + UpdateStatus(ctx context.Context, project *configv1.Project, opts metav1.UpdateOptions) (*configv1.Project, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Project, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ProjectList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Project, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ProjectList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Project, err error) - Apply(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Project, err error) + Apply(ctx context.Context, project *applyconfigurationsconfigv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Project, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) + ApplyStatus(ctx context.Context, project *applyconfigurationsconfigv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Project, err error) ProjectExpansion } // projects implements ProjectInterface type projects struct { - *gentype.ClientWithListAndApply[*v1.Project, *v1.ProjectList, *configv1.ProjectApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Project, *configv1.ProjectList, *applyconfigurationsconfigv1.ProjectApplyConfiguration] } // newProjects returns a Projects func newProjects(c *ConfigV1Client) *projects { return &projects{ - gentype.NewClientWithListAndApply[*v1.Project, *v1.ProjectList, *configv1.ProjectApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Project, *configv1.ProjectList, *applyconfigurationsconfigv1.ProjectApplyConfiguration]( "projects", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Project { return &v1.Project{} }, - func() *v1.ProjectList { return &v1.ProjectList{} }), + func() *configv1.Project { return &configv1.Project{} }, + func() *configv1.ProjectList { return &configv1.ProjectList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go index dfa2b4d19..55374ecfe 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ProxiesGetter interface { // ProxyInterface has methods to work with Proxy resources. type ProxyInterface interface { - Create(ctx context.Context, proxy *v1.Proxy, opts metav1.CreateOptions) (*v1.Proxy, error) - Update(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (*v1.Proxy, error) + Create(ctx context.Context, proxy *configv1.Proxy, opts metav1.CreateOptions) (*configv1.Proxy, error) + Update(ctx context.Context, proxy *configv1.Proxy, opts metav1.UpdateOptions) (*configv1.Proxy, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (*v1.Proxy, error) + UpdateStatus(ctx context.Context, proxy *configv1.Proxy, opts metav1.UpdateOptions) (*configv1.Proxy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Proxy, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ProxyList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Proxy, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ProxyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Proxy, err error) - Apply(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Proxy, err error) + Apply(ctx context.Context, proxy *applyconfigurationsconfigv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Proxy, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) + ApplyStatus(ctx context.Context, proxy *applyconfigurationsconfigv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Proxy, err error) ProxyExpansion } // proxies implements ProxyInterface type proxies struct { - *gentype.ClientWithListAndApply[*v1.Proxy, *v1.ProxyList, *configv1.ProxyApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Proxy, *configv1.ProxyList, *applyconfigurationsconfigv1.ProxyApplyConfiguration] } // newProxies returns a Proxies func newProxies(c *ConfigV1Client) *proxies { return &proxies{ - gentype.NewClientWithListAndApply[*v1.Proxy, *v1.ProxyList, *configv1.ProxyApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Proxy, *configv1.ProxyList, *applyconfigurationsconfigv1.ProxyApplyConfiguration]( "proxies", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Proxy { return &v1.Proxy{} }, - func() *v1.ProxyList { return &v1.ProxyList{} }), + func() *configv1.Proxy { return &configv1.Proxy{} }, + func() *configv1.ProxyList { return &configv1.ProxyList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go index 7d459060e..3bdc27dbc 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go @@ -3,10 +3,10 @@ package v1 import ( - "context" + context "context" - v1 "github.com/openshift/api/config/v1" - configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type SchedulersGetter interface { // SchedulerInterface has methods to work with Scheduler resources. type SchedulerInterface interface { - Create(ctx context.Context, scheduler *v1.Scheduler, opts metav1.CreateOptions) (*v1.Scheduler, error) - Update(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (*v1.Scheduler, error) + Create(ctx context.Context, scheduler *configv1.Scheduler, opts metav1.CreateOptions) (*configv1.Scheduler, error) + Update(ctx context.Context, scheduler *configv1.Scheduler, opts metav1.UpdateOptions) (*configv1.Scheduler, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (*v1.Scheduler, error) + UpdateStatus(ctx context.Context, scheduler *configv1.Scheduler, opts metav1.UpdateOptions) (*configv1.Scheduler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Scheduler, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.SchedulerList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Scheduler, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.SchedulerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Scheduler, err error) - Apply(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Scheduler, err error) + Apply(ctx context.Context, scheduler *applyconfigurationsconfigv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Scheduler, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) + ApplyStatus(ctx context.Context, scheduler *applyconfigurationsconfigv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Scheduler, err error) SchedulerExpansion } // schedulers implements SchedulerInterface type schedulers struct { - *gentype.ClientWithListAndApply[*v1.Scheduler, *v1.SchedulerList, *configv1.SchedulerApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1.Scheduler, *configv1.SchedulerList, *applyconfigurationsconfigv1.SchedulerApplyConfiguration] } // newSchedulers returns a Schedulers func newSchedulers(c *ConfigV1Client) *schedulers { return &schedulers{ - gentype.NewClientWithListAndApply[*v1.Scheduler, *v1.SchedulerList, *configv1.SchedulerApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1.Scheduler, *configv1.SchedulerList, *applyconfigurationsconfigv1.SchedulerApplyConfiguration]( "schedulers", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Scheduler { return &v1.Scheduler{} }, - func() *v1.SchedulerList { return &v1.SchedulerList{} }), + func() *configv1.Scheduler { return &configv1.Scheduler{} }, + func() *configv1.SchedulerList { return &configv1.SchedulerList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go index 40ade8391..89c7b176e 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "context" + context "context" - v1alpha1 "github.com/openshift/api/config/v1alpha1" - configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type BackupsGetter interface { // BackupInterface has methods to work with Backup resources. type BackupInterface interface { - Create(ctx context.Context, backup *v1alpha1.Backup, opts v1.CreateOptions) (*v1alpha1.Backup, error) - Update(ctx context.Context, backup *v1alpha1.Backup, opts v1.UpdateOptions) (*v1alpha1.Backup, error) + Create(ctx context.Context, backup *configv1alpha1.Backup, opts v1.CreateOptions) (*configv1alpha1.Backup, error) + Update(ctx context.Context, backup *configv1alpha1.Backup, opts v1.UpdateOptions) (*configv1alpha1.Backup, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, backup *v1alpha1.Backup, opts v1.UpdateOptions) (*v1alpha1.Backup, error) + UpdateStatus(ctx context.Context, backup *configv1alpha1.Backup, opts v1.UpdateOptions) (*configv1alpha1.Backup, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Backup, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.Backup, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.BackupList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Backup, err error) - Apply(ctx context.Context, backup *configv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Backup, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.Backup, err error) + Apply(ctx context.Context, backup *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.Backup, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, backup *configv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Backup, err error) + ApplyStatus(ctx context.Context, backup *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.Backup, err error) BackupExpansion } // backups implements BackupInterface type backups struct { - *gentype.ClientWithListAndApply[*v1alpha1.Backup, *v1alpha1.BackupList, *configv1alpha1.BackupApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1alpha1.Backup, *configv1alpha1.BackupList, *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration] } // newBackups returns a Backups func newBackups(c *ConfigV1alpha1Client) *backups { return &backups{ - gentype.NewClientWithListAndApply[*v1alpha1.Backup, *v1alpha1.BackupList, *configv1alpha1.BackupApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1alpha1.Backup, *configv1alpha1.BackupList, *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration]( "backups", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.Backup { return &v1alpha1.Backup{} }, - func() *v1alpha1.BackupList { return &v1alpha1.BackupList{} }), + func() *configv1alpha1.Backup { return &configv1alpha1.Backup{} }, + func() *configv1alpha1.BackupList { return &configv1alpha1.BackupList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go index 508b3604b..8391f7b40 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "context" + context "context" - v1alpha1 "github.com/openshift/api/config/v1alpha1" - configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ClusterImagePoliciesGetter interface { // ClusterImagePolicyInterface has methods to work with ClusterImagePolicy resources. type ClusterImagePolicyInterface interface { - Create(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.CreateOptions) (*v1alpha1.ClusterImagePolicy, error) - Update(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ClusterImagePolicy, error) + Create(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicy, opts v1.CreateOptions) (*configv1alpha1.ClusterImagePolicy, error) + Update(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ClusterImagePolicy, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ClusterImagePolicy, error) + UpdateStatus(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ClusterImagePolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterImagePolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterImagePolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.ClusterImagePolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.ClusterImagePolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterImagePolicy, err error) - Apply(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterImagePolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.ClusterImagePolicy, err error) + Apply(ctx context.Context, clusterImagePolicy *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ClusterImagePolicy, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterImagePolicy, err error) + ApplyStatus(ctx context.Context, clusterImagePolicy *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ClusterImagePolicy, err error) ClusterImagePolicyExpansion } // clusterImagePolicies implements ClusterImagePolicyInterface type clusterImagePolicies struct { - *gentype.ClientWithListAndApply[*v1alpha1.ClusterImagePolicy, *v1alpha1.ClusterImagePolicyList, *configv1alpha1.ClusterImagePolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1alpha1.ClusterImagePolicy, *configv1alpha1.ClusterImagePolicyList, *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration] } // newClusterImagePolicies returns a ClusterImagePolicies func newClusterImagePolicies(c *ConfigV1alpha1Client) *clusterImagePolicies { return &clusterImagePolicies{ - gentype.NewClientWithListAndApply[*v1alpha1.ClusterImagePolicy, *v1alpha1.ClusterImagePolicyList, *configv1alpha1.ClusterImagePolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1alpha1.ClusterImagePolicy, *configv1alpha1.ClusterImagePolicyList, *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration]( "clusterimagepolicies", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ClusterImagePolicy { return &v1alpha1.ClusterImagePolicy{} }, - func() *v1alpha1.ClusterImagePolicyList { return &v1alpha1.ClusterImagePolicyList{} }), + func() *configv1alpha1.ClusterImagePolicy { return &configv1alpha1.ClusterImagePolicy{} }, + func() *configv1alpha1.ClusterImagePolicyList { return &configv1alpha1.ClusterImagePolicyList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clustermonitoring.go new file mode 100644 index 000000000..8d02fc6c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clustermonitoring.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterMonitoringsGetter has a method to return a ClusterMonitoringInterface. +// A group's client should implement this interface. +type ClusterMonitoringsGetter interface { + ClusterMonitorings() ClusterMonitoringInterface +} + +// ClusterMonitoringInterface has methods to work with ClusterMonitoring resources. +type ClusterMonitoringInterface interface { + Create(ctx context.Context, clusterMonitoring *configv1alpha1.ClusterMonitoring, opts v1.CreateOptions) (*configv1alpha1.ClusterMonitoring, error) + Update(ctx context.Context, clusterMonitoring *configv1alpha1.ClusterMonitoring, opts v1.UpdateOptions) (*configv1alpha1.ClusterMonitoring, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterMonitoring *configv1alpha1.ClusterMonitoring, opts v1.UpdateOptions) (*configv1alpha1.ClusterMonitoring, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.ClusterMonitoring, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.ClusterMonitoringList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.ClusterMonitoring, err error) + Apply(ctx context.Context, clusterMonitoring *applyconfigurationsconfigv1alpha1.ClusterMonitoringApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ClusterMonitoring, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterMonitoring *applyconfigurationsconfigv1alpha1.ClusterMonitoringApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ClusterMonitoring, err error) + ClusterMonitoringExpansion +} + +// clusterMonitorings implements ClusterMonitoringInterface +type clusterMonitorings struct { + *gentype.ClientWithListAndApply[*configv1alpha1.ClusterMonitoring, *configv1alpha1.ClusterMonitoringList, *applyconfigurationsconfigv1alpha1.ClusterMonitoringApplyConfiguration] +} + +// newClusterMonitorings returns a ClusterMonitorings +func newClusterMonitorings(c *ConfigV1alpha1Client) *clusterMonitorings { + return &clusterMonitorings{ + gentype.NewClientWithListAndApply[*configv1alpha1.ClusterMonitoring, *configv1alpha1.ClusterMonitoringList, *applyconfigurationsconfigv1alpha1.ClusterMonitoringApplyConfiguration]( + "clustermonitorings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1alpha1.ClusterMonitoring { return &configv1alpha1.ClusterMonitoring{} }, + func() *configv1alpha1.ClusterMonitoringList { return &configv1alpha1.ClusterMonitoringList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go index cfbbd848b..70ebfa3cd 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "github.com/openshift/api/config/v1alpha1" - "github.com/openshift/client-go/config/clientset/versioned/scheme" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -14,6 +14,7 @@ type ConfigV1alpha1Interface interface { RESTClient() rest.Interface BackupsGetter ClusterImagePoliciesGetter + ClusterMonitoringsGetter ImagePoliciesGetter InsightsDataGathersGetter } @@ -31,6 +32,10 @@ func (c *ConfigV1alpha1Client) ClusterImagePolicies() ClusterImagePolicyInterfac return newClusterImagePolicies(c) } +func (c *ConfigV1alpha1Client) ClusterMonitorings() ClusterMonitoringInterface { + return newClusterMonitorings(c) +} + func (c *ConfigV1alpha1Client) ImagePolicies(namespace string) ImagePolicyInterface { return newImagePolicies(c, namespace) } @@ -84,10 +89,10 @@ func New(c rest.Interface) *ConfigV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := configv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go index 3a69741b1..ab5198cce 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go @@ -6,6 +6,8 @@ type BackupExpansion interface{} type ClusterImagePolicyExpansion interface{} +type ClusterMonitoringExpansion interface{} + type ImagePolicyExpansion interface{} type InsightsDataGatherExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go index 02caded7a..a893efeea 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "context" + context "context" - v1alpha1 "github.com/openshift/api/config/v1alpha1" - configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type ImagePoliciesGetter interface { // ImagePolicyInterface has methods to work with ImagePolicy resources. type ImagePolicyInterface interface { - Create(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.CreateOptions) (*v1alpha1.ImagePolicy, error) - Update(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ImagePolicy, error) + Create(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicy, opts v1.CreateOptions) (*configv1alpha1.ImagePolicy, error) + Update(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ImagePolicy, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ImagePolicy, error) + UpdateStatus(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ImagePolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ImagePolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ImagePolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.ImagePolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.ImagePolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImagePolicy, err error) - Apply(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ImagePolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.ImagePolicy, err error) + Apply(ctx context.Context, imagePolicy *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ImagePolicy, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ImagePolicy, err error) + ApplyStatus(ctx context.Context, imagePolicy *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ImagePolicy, err error) ImagePolicyExpansion } // imagePolicies implements ImagePolicyInterface type imagePolicies struct { - *gentype.ClientWithListAndApply[*v1alpha1.ImagePolicy, *v1alpha1.ImagePolicyList, *configv1alpha1.ImagePolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1alpha1.ImagePolicy, *configv1alpha1.ImagePolicyList, *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration] } // newImagePolicies returns a ImagePolicies func newImagePolicies(c *ConfigV1alpha1Client, namespace string) *imagePolicies { return &imagePolicies{ - gentype.NewClientWithListAndApply[*v1alpha1.ImagePolicy, *v1alpha1.ImagePolicyList, *configv1alpha1.ImagePolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1alpha1.ImagePolicy, *configv1alpha1.ImagePolicyList, *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration]( "imagepolicies", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha1.ImagePolicy { return &v1alpha1.ImagePolicy{} }, - func() *v1alpha1.ImagePolicyList { return &v1alpha1.ImagePolicyList{} }), + func() *configv1alpha1.ImagePolicy { return &configv1alpha1.ImagePolicy{} }, + func() *configv1alpha1.ImagePolicyList { return &configv1alpha1.ImagePolicyList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go index 8f147bae1..cff76db8d 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - "context" + context "context" - v1alpha1 "github.com/openshift/api/config/v1alpha1" - configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -22,36 +22,37 @@ type InsightsDataGathersGetter interface { // InsightsDataGatherInterface has methods to work with InsightsDataGather resources. type InsightsDataGatherInterface interface { - Create(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.CreateOptions) (*v1alpha1.InsightsDataGather, error) - Update(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error) + Create(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGather, opts v1.CreateOptions) (*configv1alpha1.InsightsDataGather, error) + Update(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*configv1alpha1.InsightsDataGather, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error) + UpdateStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*configv1alpha1.InsightsDataGather, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.InsightsDataGather, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.InsightsDataGatherList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.InsightsDataGather, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.InsightsDataGatherList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InsightsDataGather, err error) - Apply(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.InsightsDataGather, err error) + Apply(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.InsightsDataGather, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) + ApplyStatus(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.InsightsDataGather, err error) InsightsDataGatherExpansion } // insightsDataGathers implements InsightsDataGatherInterface type insightsDataGathers struct { - *gentype.ClientWithListAndApply[*v1alpha1.InsightsDataGather, *v1alpha1.InsightsDataGatherList, *configv1alpha1.InsightsDataGatherApplyConfiguration] + *gentype.ClientWithListAndApply[*configv1alpha1.InsightsDataGather, *configv1alpha1.InsightsDataGatherList, *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration] } // newInsightsDataGathers returns a InsightsDataGathers func newInsightsDataGathers(c *ConfigV1alpha1Client) *insightsDataGathers { return &insightsDataGathers{ - gentype.NewClientWithListAndApply[*v1alpha1.InsightsDataGather, *v1alpha1.InsightsDataGatherList, *configv1alpha1.InsightsDataGatherApplyConfiguration]( + gentype.NewClientWithListAndApply[*configv1alpha1.InsightsDataGather, *configv1alpha1.InsightsDataGatherList, *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration]( "insightsdatagathers", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.InsightsDataGather { return &v1alpha1.InsightsDataGather{} }, - func() *v1alpha1.InsightsDataGatherList { return &v1alpha1.InsightsDataGatherList{} }), + func() *configv1alpha1.InsightsDataGather { return &configv1alpha1.InsightsDataGather{} }, + func() *configv1alpha1.InsightsDataGatherList { return &configv1alpha1.InsightsDataGatherList{} }, + ), } } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go index 2fcff2312..262aa7b0a 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // APIServers. type APIServerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.APIServerLister + Lister() configv1.APIServerLister } type aPIServerInformer struct { @@ -54,7 +54,7 @@ func NewFilteredAPIServerInformer(client versioned.Interface, resyncPeriod time. return client.ConfigV1().APIServers().Watch(context.TODO(), options) }, }, - &configv1.APIServer{}, + &apiconfigv1.APIServer{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *aPIServerInformer) defaultInformer(client versioned.Interface, resyncPe } func (f *aPIServerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.APIServer{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.APIServer{}, f.defaultInformer) } -func (f *aPIServerInformer) Lister() v1.APIServerLister { - return v1.NewAPIServerLister(f.Informer().GetIndexer()) +func (f *aPIServerInformer) Lister() configv1.APIServerLister { + return configv1.NewAPIServerLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go index c2792cf8f..efe2c253e 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Authentications. type AuthenticationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.AuthenticationLister + Lister() configv1.AuthenticationLister } type authenticationInformer struct { @@ -54,7 +54,7 @@ func NewFilteredAuthenticationInformer(client versioned.Interface, resyncPeriod return client.ConfigV1().Authentications().Watch(context.TODO(), options) }, }, - &configv1.Authentication{}, + &apiconfigv1.Authentication{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *authenticationInformer) defaultInformer(client versioned.Interface, res } func (f *authenticationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Authentication{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Authentication{}, f.defaultInformer) } -func (f *authenticationInformer) Lister() v1.AuthenticationLister { - return v1.NewAuthenticationLister(f.Informer().GetIndexer()) +func (f *authenticationInformer) Lister() configv1.AuthenticationLister { + return configv1.NewAuthenticationLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go index c944db065..451ba252d 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Builds. type BuildInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.BuildLister + Lister() configv1.BuildLister } type buildInformer struct { @@ -54,7 +54,7 @@ func NewFilteredBuildInformer(client versioned.Interface, resyncPeriod time.Dura return client.ConfigV1().Builds().Watch(context.TODO(), options) }, }, - &configv1.Build{}, + &apiconfigv1.Build{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *buildInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Build{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Build{}, f.defaultInformer) } -func (f *buildInformer) Lister() v1.BuildLister { - return v1.NewBuildLister(f.Informer().GetIndexer()) +func (f *buildInformer) Lister() configv1.BuildLister { + return configv1.NewBuildLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go index 4c81309fb..1eda53c8b 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ClusterOperators. type ClusterOperatorInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ClusterOperatorLister + Lister() configv1.ClusterOperatorLister } type clusterOperatorInformer struct { @@ -54,7 +54,7 @@ func NewFilteredClusterOperatorInformer(client versioned.Interface, resyncPeriod return client.ConfigV1().ClusterOperators().Watch(context.TODO(), options) }, }, - &configv1.ClusterOperator{}, + &apiconfigv1.ClusterOperator{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *clusterOperatorInformer) defaultInformer(client versioned.Interface, re } func (f *clusterOperatorInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.ClusterOperator{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.ClusterOperator{}, f.defaultInformer) } -func (f *clusterOperatorInformer) Lister() v1.ClusterOperatorLister { - return v1.NewClusterOperatorLister(f.Informer().GetIndexer()) +func (f *clusterOperatorInformer) Lister() configv1.ClusterOperatorLister { + return configv1.NewClusterOperatorLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go index 8015d6eed..c3915175e 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ClusterVersions. type ClusterVersionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ClusterVersionLister + Lister() configv1.ClusterVersionLister } type clusterVersionInformer struct { @@ -54,7 +54,7 @@ func NewFilteredClusterVersionInformer(client versioned.Interface, resyncPeriod return client.ConfigV1().ClusterVersions().Watch(context.TODO(), options) }, }, - &configv1.ClusterVersion{}, + &apiconfigv1.ClusterVersion{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *clusterVersionInformer) defaultInformer(client versioned.Interface, res } func (f *clusterVersionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.ClusterVersion{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.ClusterVersion{}, f.defaultInformer) } -func (f *clusterVersionInformer) Lister() v1.ClusterVersionLister { - return v1.NewClusterVersionLister(f.Informer().GetIndexer()) +func (f *clusterVersionInformer) Lister() configv1.ClusterVersionLister { + return configv1.NewClusterVersionLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go index 7d23130a4..05a36ec0a 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Consoles. type ConsoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ConsoleLister + Lister() configv1.ConsoleLister } type consoleInformer struct { @@ -54,7 +54,7 @@ func NewFilteredConsoleInformer(client versioned.Interface, resyncPeriod time.Du return client.ConfigV1().Consoles().Watch(context.TODO(), options) }, }, - &configv1.Console{}, + &apiconfigv1.Console{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *consoleInformer) defaultInformer(client versioned.Interface, resyncPeri } func (f *consoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Console{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Console{}, f.defaultInformer) } -func (f *consoleInformer) Lister() v1.ConsoleLister { - return v1.NewConsoleLister(f.Informer().GetIndexer()) +func (f *consoleInformer) Lister() configv1.ConsoleLister { + return configv1.NewConsoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go index ddadf98cb..af44dfce9 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // DNSes. type DNSInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.DNSLister + Lister() configv1.DNSLister } type dNSInformer struct { @@ -54,7 +54,7 @@ func NewFilteredDNSInformer(client versioned.Interface, resyncPeriod time.Durati return client.ConfigV1().DNSes().Watch(context.TODO(), options) }, }, - &configv1.DNS{}, + &apiconfigv1.DNS{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *dNSInformer) defaultInformer(client versioned.Interface, resyncPeriod t } func (f *dNSInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.DNS{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.DNS{}, f.defaultInformer) } -func (f *dNSInformer) Lister() v1.DNSLister { - return v1.NewDNSLister(f.Informer().GetIndexer()) +func (f *dNSInformer) Lister() configv1.DNSLister { + return configv1.NewDNSLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go index 84cec90af..dc1e20507 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // FeatureGates. type FeatureGateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.FeatureGateLister + Lister() configv1.FeatureGateLister } type featureGateInformer struct { @@ -54,7 +54,7 @@ func NewFilteredFeatureGateInformer(client versioned.Interface, resyncPeriod tim return client.ConfigV1().FeatureGates().Watch(context.TODO(), options) }, }, - &configv1.FeatureGate{}, + &apiconfigv1.FeatureGate{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *featureGateInformer) defaultInformer(client versioned.Interface, resync } func (f *featureGateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.FeatureGate{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.FeatureGate{}, f.defaultInformer) } -func (f *featureGateInformer) Lister() v1.FeatureGateLister { - return v1.NewFeatureGateLister(f.Informer().GetIndexer()) +func (f *featureGateInformer) Lister() configv1.FeatureGateLister { + return configv1.NewFeatureGateLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go index e7a3ecc21..5f68a35ec 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Images. type ImageInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ImageLister + Lister() configv1.ImageLister } type imageInformer struct { @@ -54,7 +54,7 @@ func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Dura return client.ConfigV1().Images().Watch(context.TODO(), options) }, }, - &configv1.Image{}, + &apiconfigv1.Image{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *imageInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Image{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Image{}, f.defaultInformer) } -func (f *imageInformer) Lister() v1.ImageLister { - return v1.NewImageLister(f.Informer().GetIndexer()) +func (f *imageInformer) Lister() configv1.ImageLister { + return configv1.NewImageLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go index c50ea7b1b..e062099ea 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ImageContentPolicies. type ImageContentPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ImageContentPolicyLister + Lister() configv1.ImageContentPolicyLister } type imageContentPolicyInformer struct { @@ -54,7 +54,7 @@ func NewFilteredImageContentPolicyInformer(client versioned.Interface, resyncPer return client.ConfigV1().ImageContentPolicies().Watch(context.TODO(), options) }, }, - &configv1.ImageContentPolicy{}, + &apiconfigv1.ImageContentPolicy{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *imageContentPolicyInformer) defaultInformer(client versioned.Interface, } func (f *imageContentPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.ImageContentPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.ImageContentPolicy{}, f.defaultInformer) } -func (f *imageContentPolicyInformer) Lister() v1.ImageContentPolicyLister { - return v1.NewImageContentPolicyLister(f.Informer().GetIndexer()) +func (f *imageContentPolicyInformer) Lister() configv1.ImageContentPolicyLister { + return configv1.NewImageContentPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go index 8953cfd89..0bdadff5b 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ImageDigestMirrorSets. type ImageDigestMirrorSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ImageDigestMirrorSetLister + Lister() configv1.ImageDigestMirrorSetLister } type imageDigestMirrorSetInformer struct { @@ -54,7 +54,7 @@ func NewFilteredImageDigestMirrorSetInformer(client versioned.Interface, resyncP return client.ConfigV1().ImageDigestMirrorSets().Watch(context.TODO(), options) }, }, - &configv1.ImageDigestMirrorSet{}, + &apiconfigv1.ImageDigestMirrorSet{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *imageDigestMirrorSetInformer) defaultInformer(client versioned.Interfac } func (f *imageDigestMirrorSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.ImageDigestMirrorSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.ImageDigestMirrorSet{}, f.defaultInformer) } -func (f *imageDigestMirrorSetInformer) Lister() v1.ImageDigestMirrorSetLister { - return v1.NewImageDigestMirrorSetLister(f.Informer().GetIndexer()) +func (f *imageDigestMirrorSetInformer) Lister() configv1.ImageDigestMirrorSetLister { + return configv1.NewImageDigestMirrorSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go index a0951a190..92bf24f20 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ImageTagMirrorSets. type ImageTagMirrorSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ImageTagMirrorSetLister + Lister() configv1.ImageTagMirrorSetLister } type imageTagMirrorSetInformer struct { @@ -54,7 +54,7 @@ func NewFilteredImageTagMirrorSetInformer(client versioned.Interface, resyncPeri return client.ConfigV1().ImageTagMirrorSets().Watch(context.TODO(), options) }, }, - &configv1.ImageTagMirrorSet{}, + &apiconfigv1.ImageTagMirrorSet{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *imageTagMirrorSetInformer) defaultInformer(client versioned.Interface, } func (f *imageTagMirrorSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.ImageTagMirrorSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.ImageTagMirrorSet{}, f.defaultInformer) } -func (f *imageTagMirrorSetInformer) Lister() v1.ImageTagMirrorSetLister { - return v1.NewImageTagMirrorSetLister(f.Informer().GetIndexer()) +func (f *imageTagMirrorSetInformer) Lister() configv1.ImageTagMirrorSetLister { + return configv1.NewImageTagMirrorSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go index 150ee6fe8..4891bd249 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Infrastructures. type InfrastructureInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.InfrastructureLister + Lister() configv1.InfrastructureLister } type infrastructureInformer struct { @@ -54,7 +54,7 @@ func NewFilteredInfrastructureInformer(client versioned.Interface, resyncPeriod return client.ConfigV1().Infrastructures().Watch(context.TODO(), options) }, }, - &configv1.Infrastructure{}, + &apiconfigv1.Infrastructure{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *infrastructureInformer) defaultInformer(client versioned.Interface, res } func (f *infrastructureInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Infrastructure{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Infrastructure{}, f.defaultInformer) } -func (f *infrastructureInformer) Lister() v1.InfrastructureLister { - return v1.NewInfrastructureLister(f.Informer().GetIndexer()) +func (f *infrastructureInformer) Lister() configv1.InfrastructureLister { + return configv1.NewInfrastructureLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go index 4452b1022..59ca11638 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.IngressLister + Lister() configv1.IngressLister } type ingressInformer struct { @@ -54,7 +54,7 @@ func NewFilteredIngressInformer(client versioned.Interface, resyncPeriod time.Du return client.ConfigV1().Ingresses().Watch(context.TODO(), options) }, }, - &configv1.Ingress{}, + &apiconfigv1.Ingress{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *ingressInformer) defaultInformer(client versioned.Interface, resyncPeri } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1.IngressLister { - return v1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() configv1.IngressLister { + return configv1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go index d05980759..48e4896de 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Networks. type NetworkInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NetworkLister + Lister() configv1.NetworkLister } type networkInformer struct { @@ -54,7 +54,7 @@ func NewFilteredNetworkInformer(client versioned.Interface, resyncPeriod time.Du return client.ConfigV1().Networks().Watch(context.TODO(), options) }, }, - &configv1.Network{}, + &apiconfigv1.Network{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *networkInformer) defaultInformer(client versioned.Interface, resyncPeri } func (f *networkInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Network{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Network{}, f.defaultInformer) } -func (f *networkInformer) Lister() v1.NetworkLister { - return v1.NewNetworkLister(f.Informer().GetIndexer()) +func (f *networkInformer) Lister() configv1.NetworkLister { + return configv1.NewNetworkLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go index 6a9f806df..2cb791b00 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Nodes. type NodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NodeLister + Lister() configv1.NodeLister } type nodeInformer struct { @@ -54,7 +54,7 @@ func NewFilteredNodeInformer(client versioned.Interface, resyncPeriod time.Durat return client.ConfigV1().Nodes().Watch(context.TODO(), options) }, }, - &configv1.Node{}, + &apiconfigv1.Node{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *nodeInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *nodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Node{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Node{}, f.defaultInformer) } -func (f *nodeInformer) Lister() v1.NodeLister { - return v1.NewNodeLister(f.Informer().GetIndexer()) +func (f *nodeInformer) Lister() configv1.NodeLister { + return configv1.NewNodeLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go index 31b37b793..75128769f 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // OAuths. type OAuthInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.OAuthLister + Lister() configv1.OAuthLister } type oAuthInformer struct { @@ -54,7 +54,7 @@ func NewFilteredOAuthInformer(client versioned.Interface, resyncPeriod time.Dura return client.ConfigV1().OAuths().Watch(context.TODO(), options) }, }, - &configv1.OAuth{}, + &apiconfigv1.OAuth{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *oAuthInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *oAuthInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.OAuth{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.OAuth{}, f.defaultInformer) } -func (f *oAuthInformer) Lister() v1.OAuthLister { - return v1.NewOAuthLister(f.Informer().GetIndexer()) +func (f *oAuthInformer) Lister() configv1.OAuthLister { + return configv1.NewOAuthLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go index a2c8757fc..d2196b225 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // OperatorHubs. type OperatorHubInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.OperatorHubLister + Lister() configv1.OperatorHubLister } type operatorHubInformer struct { @@ -54,7 +54,7 @@ func NewFilteredOperatorHubInformer(client versioned.Interface, resyncPeriod tim return client.ConfigV1().OperatorHubs().Watch(context.TODO(), options) }, }, - &configv1.OperatorHub{}, + &apiconfigv1.OperatorHub{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *operatorHubInformer) defaultInformer(client versioned.Interface, resync } func (f *operatorHubInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.OperatorHub{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.OperatorHub{}, f.defaultInformer) } -func (f *operatorHubInformer) Lister() v1.OperatorHubLister { - return v1.NewOperatorHubLister(f.Informer().GetIndexer()) +func (f *operatorHubInformer) Lister() configv1.OperatorHubLister { + return configv1.NewOperatorHubLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go index c9f5af1ec..0c5604e1e 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Projects. type ProjectInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ProjectLister + Lister() configv1.ProjectLister } type projectInformer struct { @@ -54,7 +54,7 @@ func NewFilteredProjectInformer(client versioned.Interface, resyncPeriod time.Du return client.ConfigV1().Projects().Watch(context.TODO(), options) }, }, - &configv1.Project{}, + &apiconfigv1.Project{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *projectInformer) defaultInformer(client versioned.Interface, resyncPeri } func (f *projectInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Project{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Project{}, f.defaultInformer) } -func (f *projectInformer) Lister() v1.ProjectLister { - return v1.NewProjectLister(f.Informer().GetIndexer()) +func (f *projectInformer) Lister() configv1.ProjectLister { + return configv1.NewProjectLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go index cfbcd029e..aa1c2c551 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Proxies. type ProxyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ProxyLister + Lister() configv1.ProxyLister } type proxyInformer struct { @@ -54,7 +54,7 @@ func NewFilteredProxyInformer(client versioned.Interface, resyncPeriod time.Dura return client.ConfigV1().Proxies().Watch(context.TODO(), options) }, }, - &configv1.Proxy{}, + &apiconfigv1.Proxy{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *proxyInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *proxyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Proxy{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Proxy{}, f.defaultInformer) } -func (f *proxyInformer) Lister() v1.ProxyLister { - return v1.NewProxyLister(f.Informer().GetIndexer()) +func (f *proxyInformer) Lister() configv1.ProxyLister { + return configv1.NewProxyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go index 104cdd76c..0117f2941 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go @@ -3,13 +3,13 @@ package v1 import ( - "context" + context "context" time "time" - configv1 "github.com/openshift/api/config/v1" + apiconfigv1 "github.com/openshift/api/config/v1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/config/listers/config/v1" + configv1 "github.com/openshift/client-go/config/listers/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Schedulers. type SchedulerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.SchedulerLister + Lister() configv1.SchedulerLister } type schedulerInformer struct { @@ -54,7 +54,7 @@ func NewFilteredSchedulerInformer(client versioned.Interface, resyncPeriod time. return client.ConfigV1().Schedulers().Watch(context.TODO(), options) }, }, - &configv1.Scheduler{}, + &apiconfigv1.Scheduler{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *schedulerInformer) defaultInformer(client versioned.Interface, resyncPe } func (f *schedulerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1.Scheduler{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1.Scheduler{}, f.defaultInformer) } -func (f *schedulerInformer) Lister() v1.SchedulerLister { - return v1.NewSchedulerLister(f.Informer().GetIndexer()) +func (f *schedulerInformer) Lister() configv1.SchedulerLister { + return configv1.NewSchedulerLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go index ae3013d42..bed1857ee 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - "context" + context "context" time "time" - configv1alpha1 "github.com/openshift/api/config/v1alpha1" + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // Backups. type BackupInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.BackupLister + Lister() configv1alpha1.BackupLister } type backupInformer struct { @@ -54,7 +54,7 @@ func NewFilteredBackupInformer(client versioned.Interface, resyncPeriod time.Dur return client.ConfigV1alpha1().Backups().Watch(context.TODO(), options) }, }, - &configv1alpha1.Backup{}, + &apiconfigv1alpha1.Backup{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *backupInformer) defaultInformer(client versioned.Interface, resyncPerio } func (f *backupInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1alpha1.Backup{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1alpha1.Backup{}, f.defaultInformer) } -func (f *backupInformer) Lister() v1alpha1.BackupLister { - return v1alpha1.NewBackupLister(f.Informer().GetIndexer()) +func (f *backupInformer) Lister() configv1alpha1.BackupLister { + return configv1alpha1.NewBackupLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go index 2e71741a7..b11866c35 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - "context" + context "context" time "time" - configv1alpha1 "github.com/openshift/api/config/v1alpha1" + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ClusterImagePolicies. type ClusterImagePolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterImagePolicyLister + Lister() configv1alpha1.ClusterImagePolicyLister } type clusterImagePolicyInformer struct { @@ -54,7 +54,7 @@ func NewFilteredClusterImagePolicyInformer(client versioned.Interface, resyncPer return client.ConfigV1alpha1().ClusterImagePolicies().Watch(context.TODO(), options) }, }, - &configv1alpha1.ClusterImagePolicy{}, + &apiconfigv1alpha1.ClusterImagePolicy{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *clusterImagePolicyInformer) defaultInformer(client versioned.Interface, } func (f *clusterImagePolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1alpha1.ClusterImagePolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1alpha1.ClusterImagePolicy{}, f.defaultInformer) } -func (f *clusterImagePolicyInformer) Lister() v1alpha1.ClusterImagePolicyLister { - return v1alpha1.NewClusterImagePolicyLister(f.Informer().GetIndexer()) +func (f *clusterImagePolicyInformer) Lister() configv1alpha1.ClusterImagePolicyLister { + return configv1alpha1.NewClusterImagePolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go new file mode 100644 index 000000000..94a2ec3e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterMonitoringInformer provides access to a shared informer and lister for +// ClusterMonitorings. +type ClusterMonitoringInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1alpha1.ClusterMonitoringLister +} + +type clusterMonitoringInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterMonitoringInformer constructs a new informer for ClusterMonitoring type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterMonitoringInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterMonitoringInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterMonitoringInformer constructs a new informer for ClusterMonitoring type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterMonitoringInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterMonitorings().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterMonitorings().Watch(context.TODO(), options) + }, + }, + &apiconfigv1alpha1.ClusterMonitoring{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterMonitoringInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterMonitoringInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterMonitoringInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1alpha1.ClusterMonitoring{}, f.defaultInformer) +} + +func (f *clusterMonitoringInformer) Lister() configv1alpha1.ClusterMonitoringLister { + return configv1alpha1.NewClusterMonitoringLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go index ba72caedf..d6ab02a00 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - "context" + context "context" time "time" - configv1alpha1 "github.com/openshift/api/config/v1alpha1" + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // ImagePolicies. type ImagePolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ImagePolicyLister + Lister() configv1alpha1.ImagePolicyLister } type imagePolicyInformer struct { @@ -55,7 +55,7 @@ func NewFilteredImagePolicyInformer(client versioned.Interface, namespace string return client.ConfigV1alpha1().ImagePolicies(namespace).Watch(context.TODO(), options) }, }, - &configv1alpha1.ImagePolicy{}, + &apiconfigv1alpha1.ImagePolicy{}, resyncPeriod, indexers, ) @@ -66,9 +66,9 @@ func (f *imagePolicyInformer) defaultInformer(client versioned.Interface, resync } func (f *imagePolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1alpha1.ImagePolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1alpha1.ImagePolicy{}, f.defaultInformer) } -func (f *imagePolicyInformer) Lister() v1alpha1.ImagePolicyLister { - return v1alpha1.NewImagePolicyLister(f.Informer().GetIndexer()) +func (f *imagePolicyInformer) Lister() configv1alpha1.ImagePolicyLister { + return configv1alpha1.NewImagePolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go index 22a41d363..51f09bad2 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go @@ -3,13 +3,13 @@ package v1alpha1 import ( - "context" + context "context" time "time" - configv1alpha1 "github.com/openshift/api/config/v1alpha1" + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" versioned "github.com/openshift/client-go/config/clientset/versioned" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -20,7 +20,7 @@ import ( // InsightsDataGathers. type InsightsDataGatherInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.InsightsDataGatherLister + Lister() configv1alpha1.InsightsDataGatherLister } type insightsDataGatherInformer struct { @@ -54,7 +54,7 @@ func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPer return client.ConfigV1alpha1().InsightsDataGathers().Watch(context.TODO(), options) }, }, - &configv1alpha1.InsightsDataGather{}, + &apiconfigv1alpha1.InsightsDataGather{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, } func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&configv1alpha1.InsightsDataGather{}, f.defaultInformer) + return f.factory.InformerFor(&apiconfigv1alpha1.InsightsDataGather{}, f.defaultInformer) } -func (f *insightsDataGatherInformer) Lister() v1alpha1.InsightsDataGatherLister { - return v1alpha1.NewInsightsDataGatherLister(f.Informer().GetIndexer()) +func (f *insightsDataGatherInformer) Lister() configv1alpha1.InsightsDataGatherLister { + return configv1alpha1.NewInsightsDataGatherLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go index 69b5569fa..893d2db0a 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go @@ -12,6 +12,8 @@ type Interface interface { Backups() BackupInformer // ClusterImagePolicies returns a ClusterImagePolicyInformer. ClusterImagePolicies() ClusterImagePolicyInformer + // ClusterMonitorings returns a ClusterMonitoringInformer. + ClusterMonitorings() ClusterMonitoringInformer // ImagePolicies returns a ImagePolicyInformer. ImagePolicies() ImagePolicyInformer // InsightsDataGathers returns a InsightsDataGatherInformer. @@ -39,6 +41,11 @@ func (v *version) ClusterImagePolicies() ClusterImagePolicyInformer { return &clusterImagePolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// ClusterMonitorings returns a ClusterMonitoringInformer. +func (v *version) ClusterMonitorings() ClusterMonitoringInformer { + return &clusterMonitoringInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ImagePolicies returns a ImagePolicyInformer. func (v *version) ImagePolicies() ImagePolicyInformer { return &imagePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index 20b3b4e60..9135d1fcc 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -3,7 +3,7 @@ package externalversions import ( - "fmt" + fmt "fmt" v1 "github.com/openshift/api/config/v1" v1alpha1 "github.com/openshift/api/config/v1alpha1" @@ -86,6 +86,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().Backups().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clusterimagepolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ClusterImagePolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("clustermonitorings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ClusterMonitorings().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("imagepolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ImagePolicies().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("insightsdatagathers"): diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go index 456d54406..59c5faa8a 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // APIServerLister helps list APIServers. @@ -14,19 +14,19 @@ import ( type APIServerLister interface { // List lists all APIServers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.APIServer, err error) + List(selector labels.Selector) (ret []*configv1.APIServer, err error) // Get retrieves the APIServer from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.APIServer, error) + Get(name string) (*configv1.APIServer, error) APIServerListerExpansion } // aPIServerLister implements the APIServerLister interface. type aPIServerLister struct { - listers.ResourceIndexer[*v1.APIServer] + listers.ResourceIndexer[*configv1.APIServer] } // NewAPIServerLister returns a new APIServerLister. func NewAPIServerLister(indexer cache.Indexer) APIServerLister { - return &aPIServerLister{listers.New[*v1.APIServer](indexer, v1.Resource("apiserver"))} + return &aPIServerLister{listers.New[*configv1.APIServer](indexer, configv1.Resource("apiserver"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go index c048da0e1..242930e68 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // AuthenticationLister helps list Authentications. @@ -14,19 +14,19 @@ import ( type AuthenticationLister interface { // List lists all Authentications in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Authentication, err error) + List(selector labels.Selector) (ret []*configv1.Authentication, err error) // Get retrieves the Authentication from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Authentication, error) + Get(name string) (*configv1.Authentication, error) AuthenticationListerExpansion } // authenticationLister implements the AuthenticationLister interface. type authenticationLister struct { - listers.ResourceIndexer[*v1.Authentication] + listers.ResourceIndexer[*configv1.Authentication] } // NewAuthenticationLister returns a new AuthenticationLister. func NewAuthenticationLister(indexer cache.Indexer) AuthenticationLister { - return &authenticationLister{listers.New[*v1.Authentication](indexer, v1.Resource("authentication"))} + return &authenticationLister{listers.New[*configv1.Authentication](indexer, configv1.Resource("authentication"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go index c4fcae5d9..b98accfee 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // BuildLister helps list Builds. @@ -14,19 +14,19 @@ import ( type BuildLister interface { // List lists all Builds in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Build, err error) + List(selector labels.Selector) (ret []*configv1.Build, err error) // Get retrieves the Build from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Build, error) + Get(name string) (*configv1.Build, error) BuildListerExpansion } // buildLister implements the BuildLister interface. type buildLister struct { - listers.ResourceIndexer[*v1.Build] + listers.ResourceIndexer[*configv1.Build] } // NewBuildLister returns a new BuildLister. func NewBuildLister(indexer cache.Indexer) BuildLister { - return &buildLister{listers.New[*v1.Build](indexer, v1.Resource("build"))} + return &buildLister{listers.New[*configv1.Build](indexer, configv1.Resource("build"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go index d9ba7e8aa..a8eaacf78 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterOperatorLister helps list ClusterOperators. @@ -14,19 +14,19 @@ import ( type ClusterOperatorLister interface { // List lists all ClusterOperators in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ClusterOperator, err error) + List(selector labels.Selector) (ret []*configv1.ClusterOperator, err error) // Get retrieves the ClusterOperator from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ClusterOperator, error) + Get(name string) (*configv1.ClusterOperator, error) ClusterOperatorListerExpansion } // clusterOperatorLister implements the ClusterOperatorLister interface. type clusterOperatorLister struct { - listers.ResourceIndexer[*v1.ClusterOperator] + listers.ResourceIndexer[*configv1.ClusterOperator] } // NewClusterOperatorLister returns a new ClusterOperatorLister. func NewClusterOperatorLister(indexer cache.Indexer) ClusterOperatorLister { - return &clusterOperatorLister{listers.New[*v1.ClusterOperator](indexer, v1.Resource("clusteroperator"))} + return &clusterOperatorLister{listers.New[*configv1.ClusterOperator](indexer, configv1.Resource("clusteroperator"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go index d05248090..9f466ccb9 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterVersionLister helps list ClusterVersions. @@ -14,19 +14,19 @@ import ( type ClusterVersionLister interface { // List lists all ClusterVersions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ClusterVersion, err error) + List(selector labels.Selector) (ret []*configv1.ClusterVersion, err error) // Get retrieves the ClusterVersion from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ClusterVersion, error) + Get(name string) (*configv1.ClusterVersion, error) ClusterVersionListerExpansion } // clusterVersionLister implements the ClusterVersionLister interface. type clusterVersionLister struct { - listers.ResourceIndexer[*v1.ClusterVersion] + listers.ResourceIndexer[*configv1.ClusterVersion] } // NewClusterVersionLister returns a new ClusterVersionLister. func NewClusterVersionLister(indexer cache.Indexer) ClusterVersionLister { - return &clusterVersionLister{listers.New[*v1.ClusterVersion](indexer, v1.Resource("clusterversion"))} + return &clusterVersionLister{listers.New[*configv1.ClusterVersion](indexer, configv1.Resource("clusterversion"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go index 192798555..e9d9558e7 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ConsoleLister helps list Consoles. @@ -14,19 +14,19 @@ import ( type ConsoleLister interface { // List lists all Consoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Console, err error) + List(selector labels.Selector) (ret []*configv1.Console, err error) // Get retrieves the Console from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Console, error) + Get(name string) (*configv1.Console, error) ConsoleListerExpansion } // consoleLister implements the ConsoleLister interface. type consoleLister struct { - listers.ResourceIndexer[*v1.Console] + listers.ResourceIndexer[*configv1.Console] } // NewConsoleLister returns a new ConsoleLister. func NewConsoleLister(indexer cache.Indexer) ConsoleLister { - return &consoleLister{listers.New[*v1.Console](indexer, v1.Resource("console"))} + return &consoleLister{listers.New[*configv1.Console](indexer, configv1.Resource("console"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go index 85dd5e4ba..95dbcd082 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DNSLister helps list DNSes. @@ -14,19 +14,19 @@ import ( type DNSLister interface { // List lists all DNSes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.DNS, err error) + List(selector labels.Selector) (ret []*configv1.DNS, err error) // Get retrieves the DNS from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.DNS, error) + Get(name string) (*configv1.DNS, error) DNSListerExpansion } // dNSLister implements the DNSLister interface. type dNSLister struct { - listers.ResourceIndexer[*v1.DNS] + listers.ResourceIndexer[*configv1.DNS] } // NewDNSLister returns a new DNSLister. func NewDNSLister(indexer cache.Indexer) DNSLister { - return &dNSLister{listers.New[*v1.DNS](indexer, v1.Resource("dns"))} + return &dNSLister{listers.New[*configv1.DNS](indexer, configv1.Resource("dns"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go index a2bcacff6..7cedf7948 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FeatureGateLister helps list FeatureGates. @@ -14,19 +14,19 @@ import ( type FeatureGateLister interface { // List lists all FeatureGates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.FeatureGate, err error) + List(selector labels.Selector) (ret []*configv1.FeatureGate, err error) // Get retrieves the FeatureGate from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.FeatureGate, error) + Get(name string) (*configv1.FeatureGate, error) FeatureGateListerExpansion } // featureGateLister implements the FeatureGateLister interface. type featureGateLister struct { - listers.ResourceIndexer[*v1.FeatureGate] + listers.ResourceIndexer[*configv1.FeatureGate] } // NewFeatureGateLister returns a new FeatureGateLister. func NewFeatureGateLister(indexer cache.Indexer) FeatureGateLister { - return &featureGateLister{listers.New[*v1.FeatureGate](indexer, v1.Resource("featuregate"))} + return &featureGateLister{listers.New[*configv1.FeatureGate](indexer, configv1.Resource("featuregate"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go index 908753f35..407415393 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ImageLister helps list Images. @@ -14,19 +14,19 @@ import ( type ImageLister interface { // List lists all Images in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Image, err error) + List(selector labels.Selector) (ret []*configv1.Image, err error) // Get retrieves the Image from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Image, error) + Get(name string) (*configv1.Image, error) ImageListerExpansion } // imageLister implements the ImageLister interface. type imageLister struct { - listers.ResourceIndexer[*v1.Image] + listers.ResourceIndexer[*configv1.Image] } // NewImageLister returns a new ImageLister. func NewImageLister(indexer cache.Indexer) ImageLister { - return &imageLister{listers.New[*v1.Image](indexer, v1.Resource("image"))} + return &imageLister{listers.New[*configv1.Image](indexer, configv1.Resource("image"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go index edad83a7b..75607f918 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ImageContentPolicyLister helps list ImageContentPolicies. @@ -14,19 +14,19 @@ import ( type ImageContentPolicyLister interface { // List lists all ImageContentPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ImageContentPolicy, err error) + List(selector labels.Selector) (ret []*configv1.ImageContentPolicy, err error) // Get retrieves the ImageContentPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ImageContentPolicy, error) + Get(name string) (*configv1.ImageContentPolicy, error) ImageContentPolicyListerExpansion } // imageContentPolicyLister implements the ImageContentPolicyLister interface. type imageContentPolicyLister struct { - listers.ResourceIndexer[*v1.ImageContentPolicy] + listers.ResourceIndexer[*configv1.ImageContentPolicy] } // NewImageContentPolicyLister returns a new ImageContentPolicyLister. func NewImageContentPolicyLister(indexer cache.Indexer) ImageContentPolicyLister { - return &imageContentPolicyLister{listers.New[*v1.ImageContentPolicy](indexer, v1.Resource("imagecontentpolicy"))} + return &imageContentPolicyLister{listers.New[*configv1.ImageContentPolicy](indexer, configv1.Resource("imagecontentpolicy"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go index 6639b7cc0..027ded8bb 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ImageDigestMirrorSetLister helps list ImageDigestMirrorSets. @@ -14,19 +14,19 @@ import ( type ImageDigestMirrorSetLister interface { // List lists all ImageDigestMirrorSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ImageDigestMirrorSet, err error) + List(selector labels.Selector) (ret []*configv1.ImageDigestMirrorSet, err error) // Get retrieves the ImageDigestMirrorSet from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ImageDigestMirrorSet, error) + Get(name string) (*configv1.ImageDigestMirrorSet, error) ImageDigestMirrorSetListerExpansion } // imageDigestMirrorSetLister implements the ImageDigestMirrorSetLister interface. type imageDigestMirrorSetLister struct { - listers.ResourceIndexer[*v1.ImageDigestMirrorSet] + listers.ResourceIndexer[*configv1.ImageDigestMirrorSet] } // NewImageDigestMirrorSetLister returns a new ImageDigestMirrorSetLister. func NewImageDigestMirrorSetLister(indexer cache.Indexer) ImageDigestMirrorSetLister { - return &imageDigestMirrorSetLister{listers.New[*v1.ImageDigestMirrorSet](indexer, v1.Resource("imagedigestmirrorset"))} + return &imageDigestMirrorSetLister{listers.New[*configv1.ImageDigestMirrorSet](indexer, configv1.Resource("imagedigestmirrorset"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go index 8605b7e78..d390bc14e 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ImageTagMirrorSetLister helps list ImageTagMirrorSets. @@ -14,19 +14,19 @@ import ( type ImageTagMirrorSetLister interface { // List lists all ImageTagMirrorSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ImageTagMirrorSet, err error) + List(selector labels.Selector) (ret []*configv1.ImageTagMirrorSet, err error) // Get retrieves the ImageTagMirrorSet from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ImageTagMirrorSet, error) + Get(name string) (*configv1.ImageTagMirrorSet, error) ImageTagMirrorSetListerExpansion } // imageTagMirrorSetLister implements the ImageTagMirrorSetLister interface. type imageTagMirrorSetLister struct { - listers.ResourceIndexer[*v1.ImageTagMirrorSet] + listers.ResourceIndexer[*configv1.ImageTagMirrorSet] } // NewImageTagMirrorSetLister returns a new ImageTagMirrorSetLister. func NewImageTagMirrorSetLister(indexer cache.Indexer) ImageTagMirrorSetLister { - return &imageTagMirrorSetLister{listers.New[*v1.ImageTagMirrorSet](indexer, v1.Resource("imagetagmirrorset"))} + return &imageTagMirrorSetLister{listers.New[*configv1.ImageTagMirrorSet](indexer, configv1.Resource("imagetagmirrorset"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go index db1e7725a..48d592a29 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // InfrastructureLister helps list Infrastructures. @@ -14,19 +14,19 @@ import ( type InfrastructureLister interface { // List lists all Infrastructures in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Infrastructure, err error) + List(selector labels.Selector) (ret []*configv1.Infrastructure, err error) // Get retrieves the Infrastructure from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Infrastructure, error) + Get(name string) (*configv1.Infrastructure, error) InfrastructureListerExpansion } // infrastructureLister implements the InfrastructureLister interface. type infrastructureLister struct { - listers.ResourceIndexer[*v1.Infrastructure] + listers.ResourceIndexer[*configv1.Infrastructure] } // NewInfrastructureLister returns a new InfrastructureLister. func NewInfrastructureLister(indexer cache.Indexer) InfrastructureLister { - return &infrastructureLister{listers.New[*v1.Infrastructure](indexer, v1.Resource("infrastructure"))} + return &infrastructureLister{listers.New[*configv1.Infrastructure](indexer, configv1.Resource("infrastructure"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go index de29c3f1a..81538435f 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -14,19 +14,19 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Ingress, err error) + List(selector labels.Selector) (ret []*configv1.Ingress, err error) // Get retrieves the Ingress from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Ingress, error) + Get(name string) (*configv1.Ingress, error) IngressListerExpansion } // ingressLister implements the IngressLister interface. type ingressLister struct { - listers.ResourceIndexer[*v1.Ingress] + listers.ResourceIndexer[*configv1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{listers.New[*v1.Ingress](indexer, v1.Resource("ingress"))} + return &ingressLister{listers.New[*configv1.Ingress](indexer, configv1.Resource("ingress"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go index d71892d6c..3376a46b1 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NetworkLister helps list Networks. @@ -14,19 +14,19 @@ import ( type NetworkLister interface { // List lists all Networks in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Network, err error) + List(selector labels.Selector) (ret []*configv1.Network, err error) // Get retrieves the Network from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Network, error) + Get(name string) (*configv1.Network, error) NetworkListerExpansion } // networkLister implements the NetworkLister interface. type networkLister struct { - listers.ResourceIndexer[*v1.Network] + listers.ResourceIndexer[*configv1.Network] } // NewNetworkLister returns a new NetworkLister. func NewNetworkLister(indexer cache.Indexer) NetworkLister { - return &networkLister{listers.New[*v1.Network](indexer, v1.Resource("network"))} + return &networkLister{listers.New[*configv1.Network](indexer, configv1.Resource("network"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go index 08e323670..2520016a5 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NodeLister helps list Nodes. @@ -14,19 +14,19 @@ import ( type NodeLister interface { // List lists all Nodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Node, err error) + List(selector labels.Selector) (ret []*configv1.Node, err error) // Get retrieves the Node from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Node, error) + Get(name string) (*configv1.Node, error) NodeListerExpansion } // nodeLister implements the NodeLister interface. type nodeLister struct { - listers.ResourceIndexer[*v1.Node] + listers.ResourceIndexer[*configv1.Node] } // NewNodeLister returns a new NodeLister. func NewNodeLister(indexer cache.Indexer) NodeLister { - return &nodeLister{listers.New[*v1.Node](indexer, v1.Resource("node"))} + return &nodeLister{listers.New[*configv1.Node](indexer, configv1.Resource("node"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go index abfa6cc2a..5cffcd7bf 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // OAuthLister helps list OAuths. @@ -14,19 +14,19 @@ import ( type OAuthLister interface { // List lists all OAuths in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.OAuth, err error) + List(selector labels.Selector) (ret []*configv1.OAuth, err error) // Get retrieves the OAuth from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.OAuth, error) + Get(name string) (*configv1.OAuth, error) OAuthListerExpansion } // oAuthLister implements the OAuthLister interface. type oAuthLister struct { - listers.ResourceIndexer[*v1.OAuth] + listers.ResourceIndexer[*configv1.OAuth] } // NewOAuthLister returns a new OAuthLister. func NewOAuthLister(indexer cache.Indexer) OAuthLister { - return &oAuthLister{listers.New[*v1.OAuth](indexer, v1.Resource("oauth"))} + return &oAuthLister{listers.New[*configv1.OAuth](indexer, configv1.Resource("oauth"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go index e4b0e4712..a28f63f79 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // OperatorHubLister helps list OperatorHubs. @@ -14,19 +14,19 @@ import ( type OperatorHubLister interface { // List lists all OperatorHubs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.OperatorHub, err error) + List(selector labels.Selector) (ret []*configv1.OperatorHub, err error) // Get retrieves the OperatorHub from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.OperatorHub, error) + Get(name string) (*configv1.OperatorHub, error) OperatorHubListerExpansion } // operatorHubLister implements the OperatorHubLister interface. type operatorHubLister struct { - listers.ResourceIndexer[*v1.OperatorHub] + listers.ResourceIndexer[*configv1.OperatorHub] } // NewOperatorHubLister returns a new OperatorHubLister. func NewOperatorHubLister(indexer cache.Indexer) OperatorHubLister { - return &operatorHubLister{listers.New[*v1.OperatorHub](indexer, v1.Resource("operatorhub"))} + return &operatorHubLister{listers.New[*configv1.OperatorHub](indexer, configv1.Resource("operatorhub"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go index 67a1ee323..fbc57217f 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ProjectLister helps list Projects. @@ -14,19 +14,19 @@ import ( type ProjectLister interface { // List lists all Projects in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Project, err error) + List(selector labels.Selector) (ret []*configv1.Project, err error) // Get retrieves the Project from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Project, error) + Get(name string) (*configv1.Project, error) ProjectListerExpansion } // projectLister implements the ProjectLister interface. type projectLister struct { - listers.ResourceIndexer[*v1.Project] + listers.ResourceIndexer[*configv1.Project] } // NewProjectLister returns a new ProjectLister. func NewProjectLister(indexer cache.Indexer) ProjectLister { - return &projectLister{listers.New[*v1.Project](indexer, v1.Resource("project"))} + return &projectLister{listers.New[*configv1.Project](indexer, configv1.Resource("project"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go index 39837c2f6..8edbd0fab 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ProxyLister helps list Proxies. @@ -14,19 +14,19 @@ import ( type ProxyLister interface { // List lists all Proxies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Proxy, err error) + List(selector labels.Selector) (ret []*configv1.Proxy, err error) // Get retrieves the Proxy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Proxy, error) + Get(name string) (*configv1.Proxy, error) ProxyListerExpansion } // proxyLister implements the ProxyLister interface. type proxyLister struct { - listers.ResourceIndexer[*v1.Proxy] + listers.ResourceIndexer[*configv1.Proxy] } // NewProxyLister returns a new ProxyLister. func NewProxyLister(indexer cache.Indexer) ProxyLister { - return &proxyLister{listers.New[*v1.Proxy](indexer, v1.Resource("proxy"))} + return &proxyLister{listers.New[*configv1.Proxy](indexer, configv1.Resource("proxy"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go index 6f95e0254..a90829c8d 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go @@ -3,10 +3,10 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SchedulerLister helps list Schedulers. @@ -14,19 +14,19 @@ import ( type SchedulerLister interface { // List lists all Schedulers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Scheduler, err error) + List(selector labels.Selector) (ret []*configv1.Scheduler, err error) // Get retrieves the Scheduler from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Scheduler, error) + Get(name string) (*configv1.Scheduler, error) SchedulerListerExpansion } // schedulerLister implements the SchedulerLister interface. type schedulerLister struct { - listers.ResourceIndexer[*v1.Scheduler] + listers.ResourceIndexer[*configv1.Scheduler] } // NewSchedulerLister returns a new SchedulerLister. func NewSchedulerLister(indexer cache.Indexer) SchedulerLister { - return &schedulerLister{listers.New[*v1.Scheduler](indexer, v1.Resource("scheduler"))} + return &schedulerLister{listers.New[*configv1.Scheduler](indexer, configv1.Resource("scheduler"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go index c7d3ba546..6b992e0d0 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // BackupLister helps list Backups. @@ -14,19 +14,19 @@ import ( type BackupLister interface { // List lists all Backups in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Backup, err error) + List(selector labels.Selector) (ret []*configv1alpha1.Backup, err error) // Get retrieves the Backup from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Backup, error) + Get(name string) (*configv1alpha1.Backup, error) BackupListerExpansion } // backupLister implements the BackupLister interface. type backupLister struct { - listers.ResourceIndexer[*v1alpha1.Backup] + listers.ResourceIndexer[*configv1alpha1.Backup] } // NewBackupLister returns a new BackupLister. func NewBackupLister(indexer cache.Indexer) BackupLister { - return &backupLister{listers.New[*v1alpha1.Backup](indexer, v1alpha1.Resource("backup"))} + return &backupLister{listers.New[*configv1alpha1.Backup](indexer, configv1alpha1.Resource("backup"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go index ab909818e..0512d3682 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterImagePolicyLister helps list ClusterImagePolicies. @@ -14,19 +14,19 @@ import ( type ClusterImagePolicyLister interface { // List lists all ClusterImagePolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterImagePolicy, err error) + List(selector labels.Selector) (ret []*configv1alpha1.ClusterImagePolicy, err error) // Get retrieves the ClusterImagePolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterImagePolicy, error) + Get(name string) (*configv1alpha1.ClusterImagePolicy, error) ClusterImagePolicyListerExpansion } // clusterImagePolicyLister implements the ClusterImagePolicyLister interface. type clusterImagePolicyLister struct { - listers.ResourceIndexer[*v1alpha1.ClusterImagePolicy] + listers.ResourceIndexer[*configv1alpha1.ClusterImagePolicy] } // NewClusterImagePolicyLister returns a new ClusterImagePolicyLister. func NewClusterImagePolicyLister(indexer cache.Indexer) ClusterImagePolicyLister { - return &clusterImagePolicyLister{listers.New[*v1alpha1.ClusterImagePolicy](indexer, v1alpha1.Resource("clusterimagepolicy"))} + return &clusterImagePolicyLister{listers.New[*configv1alpha1.ClusterImagePolicy](indexer, configv1alpha1.Resource("clusterimagepolicy"))} } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clustermonitoring.go new file mode 100644 index 000000000..50beb3f98 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clustermonitoring.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterMonitoringLister helps list ClusterMonitorings. +// All objects returned here must be treated as read-only. +type ClusterMonitoringLister interface { + // List lists all ClusterMonitorings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha1.ClusterMonitoring, err error) + // Get retrieves the ClusterMonitoring from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1alpha1.ClusterMonitoring, error) + ClusterMonitoringListerExpansion +} + +// clusterMonitoringLister implements the ClusterMonitoringLister interface. +type clusterMonitoringLister struct { + listers.ResourceIndexer[*configv1alpha1.ClusterMonitoring] +} + +// NewClusterMonitoringLister returns a new ClusterMonitoringLister. +func NewClusterMonitoringLister(indexer cache.Indexer) ClusterMonitoringLister { + return &clusterMonitoringLister{listers.New[*configv1alpha1.ClusterMonitoring](indexer, configv1alpha1.Resource("clustermonitoring"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go index 97e64a7cc..09b4d206d 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go @@ -10,6 +10,10 @@ type BackupListerExpansion interface{} // ClusterImagePolicyLister. type ClusterImagePolicyListerExpansion interface{} +// ClusterMonitoringListerExpansion allows custom methods to be added to +// ClusterMonitoringLister. +type ClusterMonitoringListerExpansion interface{} + // ImagePolicyListerExpansion allows custom methods to be added to // ImagePolicyLister. type ImagePolicyListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go index e5aa27161..7050c5771 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ImagePolicyLister helps list ImagePolicies. @@ -14,7 +14,7 @@ import ( type ImagePolicyLister interface { // List lists all ImagePolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ImagePolicy, err error) + List(selector labels.Selector) (ret []*configv1alpha1.ImagePolicy, err error) // ImagePolicies returns an object that can list and get ImagePolicies. ImagePolicies(namespace string) ImagePolicyNamespaceLister ImagePolicyListerExpansion @@ -22,17 +22,17 @@ type ImagePolicyLister interface { // imagePolicyLister implements the ImagePolicyLister interface. type imagePolicyLister struct { - listers.ResourceIndexer[*v1alpha1.ImagePolicy] + listers.ResourceIndexer[*configv1alpha1.ImagePolicy] } // NewImagePolicyLister returns a new ImagePolicyLister. func NewImagePolicyLister(indexer cache.Indexer) ImagePolicyLister { - return &imagePolicyLister{listers.New[*v1alpha1.ImagePolicy](indexer, v1alpha1.Resource("imagepolicy"))} + return &imagePolicyLister{listers.New[*configv1alpha1.ImagePolicy](indexer, configv1alpha1.Resource("imagepolicy"))} } // ImagePolicies returns an object that can list and get ImagePolicies. func (s *imagePolicyLister) ImagePolicies(namespace string) ImagePolicyNamespaceLister { - return imagePolicyNamespaceLister{listers.NewNamespaced[*v1alpha1.ImagePolicy](s.ResourceIndexer, namespace)} + return imagePolicyNamespaceLister{listers.NewNamespaced[*configv1alpha1.ImagePolicy](s.ResourceIndexer, namespace)} } // ImagePolicyNamespaceLister helps list and get ImagePolicies. @@ -40,15 +40,15 @@ func (s *imagePolicyLister) ImagePolicies(namespace string) ImagePolicyNamespace type ImagePolicyNamespaceLister interface { // List lists all ImagePolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ImagePolicy, err error) + List(selector labels.Selector) (ret []*configv1alpha1.ImagePolicy, err error) // Get retrieves the ImagePolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ImagePolicy, error) + Get(name string) (*configv1alpha1.ImagePolicy, error) ImagePolicyNamespaceListerExpansion } // imagePolicyNamespaceLister implements the ImagePolicyNamespaceLister // interface. type imagePolicyNamespaceLister struct { - listers.ResourceIndexer[*v1alpha1.ImagePolicy] + listers.ResourceIndexer[*configv1alpha1.ImagePolicy] } diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go index 4885030b9..9328022a4 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go @@ -3,10 +3,10 @@ package v1alpha1 import ( - v1alpha1 "github.com/openshift/api/config/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // InsightsDataGatherLister helps list InsightsDataGathers. @@ -14,19 +14,19 @@ import ( type InsightsDataGatherLister interface { // List lists all InsightsDataGathers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.InsightsDataGather, err error) + List(selector labels.Selector) (ret []*configv1alpha1.InsightsDataGather, err error) // Get retrieves the InsightsDataGather from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.InsightsDataGather, error) + Get(name string) (*configv1alpha1.InsightsDataGather, error) InsightsDataGatherListerExpansion } // insightsDataGatherLister implements the InsightsDataGatherLister interface. type insightsDataGatherLister struct { - listers.ResourceIndexer[*v1alpha1.InsightsDataGather] + listers.ResourceIndexer[*configv1alpha1.InsightsDataGather] } // NewInsightsDataGatherLister returns a new InsightsDataGatherLister. func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister { - return &insightsDataGatherLister{listers.New[*v1alpha1.InsightsDataGather](indexer, v1alpha1.Resource("insightsdatagather"))} + return &insightsDataGatherLister{listers.New[*configv1alpha1.InsightsDataGather](indexer, configv1alpha1.Resource("insightsdatagather"))} } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index 60be55fbb..1c88fc776 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -3,8 +3,8 @@ package internal import ( - "fmt" - "sync" + fmt "fmt" + sync "sync" typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) @@ -1542,9 +1542,23 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.IPsecConfig map: fields: + - name: full + type: + namedType: com.github.openshift.api.operator.v1.IPsecFullModeConfig - name: mode type: scalar: string + unions: + - discriminator: mode + fields: + - fieldName: full + discriminatorValue: Full +- name: com.github.openshift.api.operator.v1.IPsecFullModeConfig + map: + fields: + - name: encapsulation + type: + scalar: string - name: com.github.openshift.api.operator.v1.IPv4GatewayConfig map: fields: @@ -1775,6 +1789,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpHeaders type: namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaders + - name: idleConnectionTerminationPolicy + type: + scalar: string + default: Immediate - name: logging type: namedType: com.github.openshift.api.operator.v1.IngressControllerLogging @@ -2897,6 +2915,81 @@ var schemaYAML = typed.YAMLObject(`types: - name: latestAvailableRevision type: scalar: numeric +- name: com.github.openshift.api.operator.v1.OLM + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.OLMSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.OLMStatus + default: {} +- name: com.github.openshift.api.operator.v1.OLMSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.OLMStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string - name: com.github.openshift.api.operator.v1.OVNKubernetesConfig map: fields: @@ -3111,7 +3204,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.OpenStackLoadBalancerParameters map: fields: - - name: loadBalancerIP + - name: floatingIP type: scalar: string - name: com.github.openshift.api.operator.v1.OperatorCondition @@ -3811,6 +3904,39 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperator + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus + default: {} +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec + map: + fields: + - name: operatorLogLevel + type: + scalar: string +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus + map: + fields: + - name: observedGeneration + type: + scalar: numeric - name: com.github.openshift.api.operator.v1alpha1.EtcdBackup map: fields: diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go index 24508e916..a43b86656 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // AdditionalNetworkDefinitionApplyConfiguration represents a declarative configuration of the AdditionalNetworkDefinition type for use // with apply. type AdditionalNetworkDefinitionApplyConfiguration struct { - Type *v1.NetworkType `json:"type,omitempty"` + Type *operatorv1.NetworkType `json:"type,omitempty"` Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` RawCNIConfig *string `json:"rawCNIConfig,omitempty"` @@ -25,7 +25,7 @@ func AdditionalNetworkDefinition() *AdditionalNetworkDefinitionApplyConfiguratio // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *AdditionalNetworkDefinitionApplyConfiguration) WithType(value v1.NetworkType) *AdditionalNetworkDefinitionApplyConfiguration { +func (b *AdditionalNetworkDefinitionApplyConfiguration) WithType(value operatorv1.NetworkType) *AdditionalNetworkDefinitionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go index 9707fc73b..95a77d116 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // AdditionalRoutingCapabilitiesApplyConfiguration represents a declarative configuration of the AdditionalRoutingCapabilities type for use // with apply. type AdditionalRoutingCapabilitiesApplyConfiguration struct { - Providers []v1.RoutingCapabilitiesProvider `json:"providers,omitempty"` + Providers []operatorv1.RoutingCapabilitiesProvider `json:"providers,omitempty"` } // AdditionalRoutingCapabilitiesApplyConfiguration constructs a declarative configuration of the AdditionalRoutingCapabilities type for use with @@ -21,7 +21,7 @@ func AdditionalRoutingCapabilities() *AdditionalRoutingCapabilitiesApplyConfigur // WithProviders adds the given value to the Providers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Providers field. -func (b *AdditionalRoutingCapabilitiesApplyConfiguration) WithProviders(values ...v1.RoutingCapabilitiesProvider) *AdditionalRoutingCapabilitiesApplyConfiguration { +func (b *AdditionalRoutingCapabilitiesApplyConfiguration) WithProviders(values ...operatorv1.RoutingCapabilitiesProvider) *AdditionalRoutingCapabilitiesApplyConfiguration { for i := range values { b.Providers = append(b.Providers, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go index c9ac2464b..ec839a2ff 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // AuthenticationApplyConfiguration represents a declarative configuration of the Authentication type for use // with apply. type AuthenticationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"` - Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"` + Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` } // Authentication constructs a declarative configuration of the Authentication type for use with @@ -41,18 +41,18 @@ func Authentication(name string) *AuthenticationApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractAuthentication(authentication *apioperatorv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { +func ExtractAuthentication(authentication *operatorv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { return extractAuthentication(authentication, fieldManager, "") } // ExtractAuthenticationStatus is the same as ExtractAuthentication except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractAuthenticationStatus(authentication *apioperatorv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { +func ExtractAuthenticationStatus(authentication *operatorv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { return extractAuthentication(authentication, fieldManager, "status") } -func extractAuthentication(authentication *apioperatorv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) { +func extractAuthentication(authentication *operatorv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) { b := &AuthenticationApplyConfiguration{} err := managedfields.ExtractInto(authentication, internal.Parser().Type("com.github.openshift.api.operator.v1.Authentication"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractAuthentication(authentication *apioperatorv1.Authentication, fieldMa // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithKind(value string) *AuthenticationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *AuthenticationApplyConfiguration) WithKind(value string) *Authenticatio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *AuthenticationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *Authent // If called multiple times, the Name field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithName(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *AuthenticationApplyConfiguration) WithName(value string) *Authenticatio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *Authe // If called multiple times, the Namespace field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *Authenti // If called multiple times, the UID field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *Authenticat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *Au // If called multiple times, the Generation field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithGeneration(value int64) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *AuthenticationApplyConfiguration { +func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *AuthenticationApplyConfiguration { +func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration { +func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *AuthenticationApplyConfiguration) WithFinalizers(values ...string) *AuthenticationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *AuthenticationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatu // GetName retrieves the value of the Name field in the declarative configuration. func (b *AuthenticationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go index 572a5332a..ac90816bc 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go @@ -23,7 +23,7 @@ func AuthenticationSpec() *AuthenticationSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *AuthenticationSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *AuthenticationSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *AuthenticationSpecApplyConfiguration) WithManagementState(value operato // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *AuthenticationSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *AuthenticationSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *AuthenticationSpecApplyConfiguration) WithLogLevel(value operatorv1.Log // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *AuthenticationSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *AuthenticationSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *AuthenticationSpecApplyConfiguration) WithOperatorLogLevel(value operat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *AuthenticationSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *AuthenticationSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *AuthenticationSpecApplyConfiguration) WithUnsupportedConfigOverrides(va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *AuthenticationSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *AuthenticationSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go index 4f64c5ad3..ee84050a4 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go @@ -27,7 +27,7 @@ func (b *AuthenticationStatusApplyConfiguration) WithOAuthAPIServer(value *OAuth // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *AuthenticationStatusApplyConfiguration) WithObservedGeneration(value int64) *AuthenticationStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -39,7 +39,7 @@ func (b *AuthenticationStatusApplyConfiguration) WithConditions(values ...*Opera if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -48,7 +48,7 @@ func (b *AuthenticationStatusApplyConfiguration) WithConditions(values ...*Opera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *AuthenticationStatusApplyConfiguration) WithVersion(value string) *AuthenticationStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -56,7 +56,7 @@ func (b *AuthenticationStatusApplyConfiguration) WithVersion(value string) *Auth // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *AuthenticationStatusApplyConfiguration) WithReadyReplicas(value int32) *AuthenticationStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -64,7 +64,7 @@ func (b *AuthenticationStatusApplyConfiguration) WithReadyReplicas(value int32) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *AuthenticationStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *AuthenticationStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -76,7 +76,7 @@ func (b *AuthenticationStatusApplyConfiguration) WithGenerations(values ...*Gene if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go index c0b3f0d09..b490ac0e5 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // AWSClassicLoadBalancerParametersApplyConfiguration represents a declarative configuration of the AWSClassicLoadBalancerParameters type for use // with apply. type AWSClassicLoadBalancerParametersApplyConfiguration struct { - ConnectionIdleTimeout *v1.Duration `json:"connectionIdleTimeout,omitempty"` + ConnectionIdleTimeout *metav1.Duration `json:"connectionIdleTimeout,omitempty"` Subnets *AWSSubnetsApplyConfiguration `json:"subnets,omitempty"` } @@ -22,7 +22,7 @@ func AWSClassicLoadBalancerParameters() *AWSClassicLoadBalancerParametersApplyCo // WithConnectionIdleTimeout sets the ConnectionIdleTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConnectionIdleTimeout field is set to the value of the last call. -func (b *AWSClassicLoadBalancerParametersApplyConfiguration) WithConnectionIdleTimeout(value v1.Duration) *AWSClassicLoadBalancerParametersApplyConfiguration { +func (b *AWSClassicLoadBalancerParametersApplyConfiguration) WithConnectionIdleTimeout(value metav1.Duration) *AWSClassicLoadBalancerParametersApplyConfiguration { b.ConnectionIdleTimeout = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go index 16891541f..011bbeaff 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // AWSEFSVolumeMetricsApplyConfiguration represents a declarative configuration of the AWSEFSVolumeMetrics type for use // with apply. type AWSEFSVolumeMetricsApplyConfiguration struct { - State *v1.AWSEFSVolumeMetricsState `json:"state,omitempty"` + State *operatorv1.AWSEFSVolumeMetricsState `json:"state,omitempty"` RecursiveWalk *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration `json:"recursiveWalk,omitempty"` } @@ -22,7 +22,7 @@ func AWSEFSVolumeMetrics() *AWSEFSVolumeMetricsApplyConfiguration { // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *AWSEFSVolumeMetricsApplyConfiguration) WithState(value v1.AWSEFSVolumeMetricsState) *AWSEFSVolumeMetricsApplyConfiguration { +func (b *AWSEFSVolumeMetricsApplyConfiguration) WithState(value operatorv1.AWSEFSVolumeMetricsState) *AWSEFSVolumeMetricsApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go index 39599f59a..8805b7eec 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // AWSLoadBalancerParametersApplyConfiguration represents a declarative configuration of the AWSLoadBalancerParameters type for use // with apply. type AWSLoadBalancerParametersApplyConfiguration struct { - Type *v1.AWSLoadBalancerType `json:"type,omitempty"` + Type *operatorv1.AWSLoadBalancerType `json:"type,omitempty"` ClassicLoadBalancerParameters *AWSClassicLoadBalancerParametersApplyConfiguration `json:"classicLoadBalancer,omitempty"` NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParametersApplyConfiguration `json:"networkLoadBalancer,omitempty"` } @@ -23,7 +23,7 @@ func AWSLoadBalancerParameters() *AWSLoadBalancerParametersApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *AWSLoadBalancerParametersApplyConfiguration) WithType(value v1.AWSLoadBalancerType) *AWSLoadBalancerParametersApplyConfiguration { +func (b *AWSLoadBalancerParametersApplyConfiguration) WithType(value operatorv1.AWSLoadBalancerType) *AWSLoadBalancerParametersApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go index 8dbc4f688..f127ac4ca 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // AWSSubnetsApplyConfiguration represents a declarative configuration of the AWSSubnets type for use // with apply. type AWSSubnetsApplyConfiguration struct { - IDs []v1.AWSSubnetID `json:"ids,omitempty"` - Names []v1.AWSSubnetName `json:"names,omitempty"` + IDs []operatorv1.AWSSubnetID `json:"ids,omitempty"` + Names []operatorv1.AWSSubnetName `json:"names,omitempty"` } // AWSSubnetsApplyConfiguration constructs a declarative configuration of the AWSSubnets type for use with @@ -22,7 +22,7 @@ func AWSSubnets() *AWSSubnetsApplyConfiguration { // WithIDs adds the given value to the IDs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the IDs field. -func (b *AWSSubnetsApplyConfiguration) WithIDs(values ...v1.AWSSubnetID) *AWSSubnetsApplyConfiguration { +func (b *AWSSubnetsApplyConfiguration) WithIDs(values ...operatorv1.AWSSubnetID) *AWSSubnetsApplyConfiguration { for i := range values { b.IDs = append(b.IDs, values[i]) } @@ -32,7 +32,7 @@ func (b *AWSSubnetsApplyConfiguration) WithIDs(values ...v1.AWSSubnetID) *AWSSub // WithNames adds the given value to the Names field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Names field. -func (b *AWSSubnetsApplyConfiguration) WithNames(values ...v1.AWSSubnetName) *AWSSubnetsApplyConfiguration { +func (b *AWSSubnetsApplyConfiguration) WithNames(values ...operatorv1.AWSSubnetName) *AWSSubnetsApplyConfiguration { for i := range values { b.Names = append(b.Names, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go index 01d8544b8..ce7ca886f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // CapabilityApplyConfiguration represents a declarative configuration of the Capability type for use // with apply. type CapabilityApplyConfiguration struct { - Name *v1.ConsoleCapabilityName `json:"name,omitempty"` + Name *operatorv1.ConsoleCapabilityName `json:"name,omitempty"` Visibility *CapabilityVisibilityApplyConfiguration `json:"visibility,omitempty"` } @@ -22,7 +22,7 @@ func Capability() *CapabilityApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *CapabilityApplyConfiguration) WithName(value v1.ConsoleCapabilityName) *CapabilityApplyConfiguration { +func (b *CapabilityApplyConfiguration) WithName(value operatorv1.ConsoleCapabilityName) *CapabilityApplyConfiguration { b.Name = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go index a5779ead3..9e86b3d38 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // CapabilityVisibilityApplyConfiguration represents a declarative configuration of the CapabilityVisibility type for use // with apply. type CapabilityVisibilityApplyConfiguration struct { - State *v1.CapabilityState `json:"state,omitempty"` + State *operatorv1.CapabilityState `json:"state,omitempty"` } // CapabilityVisibilityApplyConfiguration constructs a declarative configuration of the CapabilityVisibility type for use with @@ -21,7 +21,7 @@ func CapabilityVisibility() *CapabilityVisibilityApplyConfiguration { // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *CapabilityVisibilityApplyConfiguration) WithState(value v1.CapabilityState) *CapabilityVisibilityApplyConfiguration { +func (b *CapabilityVisibilityApplyConfiguration) WithState(value operatorv1.CapabilityState) *CapabilityVisibilityApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go index 5617c1b0b..4b6d55997 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go @@ -4,15 +4,15 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // ClientTLSApplyConfiguration represents a declarative configuration of the ClientTLS type for use // with apply. type ClientTLSApplyConfiguration struct { - ClientCertificatePolicy *v1.ClientCertificatePolicy `json:"clientCertificatePolicy,omitempty"` - ClientCA *configv1.ConfigMapNameReference `json:"clientCA,omitempty"` - AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"` + ClientCertificatePolicy *operatorv1.ClientCertificatePolicy `json:"clientCertificatePolicy,omitempty"` + ClientCA *configv1.ConfigMapNameReference `json:"clientCA,omitempty"` + AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"` } // ClientTLSApplyConfiguration constructs a declarative configuration of the ClientTLS type for use with @@ -24,7 +24,7 @@ func ClientTLS() *ClientTLSApplyConfiguration { // WithClientCertificatePolicy sets the ClientCertificatePolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ClientCertificatePolicy field is set to the value of the last call. -func (b *ClientTLSApplyConfiguration) WithClientCertificatePolicy(value v1.ClientCertificatePolicy) *ClientTLSApplyConfiguration { +func (b *ClientTLSApplyConfiguration) WithClientCertificatePolicy(value operatorv1.ClientCertificatePolicy) *ClientTLSApplyConfiguration { b.ClientCertificatePolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go index 80824b539..148c6a440 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CloudCredentialApplyConfiguration represents a declarative configuration of the CloudCredential type for use // with apply. type CloudCredentialApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CloudCredentialSpecApplyConfiguration `json:"spec,omitempty"` - Status *CloudCredentialStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CloudCredentialSpecApplyConfiguration `json:"spec,omitempty"` + Status *CloudCredentialStatusApplyConfiguration `json:"status,omitempty"` } // CloudCredential constructs a declarative configuration of the CloudCredential type for use with @@ -41,18 +41,18 @@ func CloudCredential(name string) *CloudCredentialApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCloudCredential(cloudCredential *apioperatorv1.CloudCredential, fieldManager string) (*CloudCredentialApplyConfiguration, error) { +func ExtractCloudCredential(cloudCredential *operatorv1.CloudCredential, fieldManager string) (*CloudCredentialApplyConfiguration, error) { return extractCloudCredential(cloudCredential, fieldManager, "") } // ExtractCloudCredentialStatus is the same as ExtractCloudCredential except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCloudCredentialStatus(cloudCredential *apioperatorv1.CloudCredential, fieldManager string) (*CloudCredentialApplyConfiguration, error) { +func ExtractCloudCredentialStatus(cloudCredential *operatorv1.CloudCredential, fieldManager string) (*CloudCredentialApplyConfiguration, error) { return extractCloudCredential(cloudCredential, fieldManager, "status") } -func extractCloudCredential(cloudCredential *apioperatorv1.CloudCredential, fieldManager string, subresource string) (*CloudCredentialApplyConfiguration, error) { +func extractCloudCredential(cloudCredential *operatorv1.CloudCredential, fieldManager string, subresource string) (*CloudCredentialApplyConfiguration, error) { b := &CloudCredentialApplyConfiguration{} err := managedfields.ExtractInto(cloudCredential, internal.Parser().Type("com.github.openshift.api.operator.v1.CloudCredential"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractCloudCredential(cloudCredential *apioperatorv1.CloudCredential, fiel // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithKind(value string) *CloudCredentialApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *CloudCredentialApplyConfiguration) WithKind(value string) *CloudCredent // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithAPIVersion(value string) *CloudCredentialApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *CloudCredentialApplyConfiguration) WithAPIVersion(value string) *CloudC // If called multiple times, the Name field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithName(value string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *CloudCredentialApplyConfiguration) WithName(value string) *CloudCredent // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithGenerateName(value string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *CloudCredentialApplyConfiguration) WithGenerateName(value string) *Clou // If called multiple times, the Namespace field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithNamespace(value string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *CloudCredentialApplyConfiguration) WithNamespace(value string) *CloudCr // If called multiple times, the UID field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithUID(value types.UID) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *CloudCredentialApplyConfiguration) WithUID(value types.UID) *CloudCrede // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithResourceVersion(value string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *CloudCredentialApplyConfiguration) WithResourceVersion(value string) *C // If called multiple times, the Generation field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithGeneration(value int64) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CloudCredentialApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CloudCredentialApplyConfiguration { +func (b *CloudCredentialApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CloudCredentialApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CloudCredentialApplyConfiguration { +func (b *CloudCredentialApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *CloudCredentialApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CloudCredentialApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *CloudCredentialApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *CloudCredentialApplyConfiguration) WithLabels(entries map[string]string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *CloudCredentialApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *CloudCredentialApplyConfiguration) WithAnnotations(entries map[string]string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *CloudCredentialApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CloudCredentialApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CloudCredentialApplyConfiguration { +func (b *CloudCredentialApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *CloudCredentialApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *CloudCredentialApplyConfiguration) WithFinalizers(values ...string) *CloudCredentialApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CloudCredentialApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *CloudCredentialApplyConfiguration) WithStatus(value *CloudCredentialSta // GetName retrieves the value of the Name field in the declarative configuration. func (b *CloudCredentialApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go index 98f36a12b..60712afa2 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go @@ -24,7 +24,7 @@ func CloudCredentialSpec() *CloudCredentialSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *CloudCredentialSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *CloudCredentialSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -32,7 +32,7 @@ func (b *CloudCredentialSpecApplyConfiguration) WithManagementState(value operat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *CloudCredentialSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *CloudCredentialSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -40,7 +40,7 @@ func (b *CloudCredentialSpecApplyConfiguration) WithLogLevel(value operatorv1.Lo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *CloudCredentialSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *CloudCredentialSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -48,7 +48,7 @@ func (b *CloudCredentialSpecApplyConfiguration) WithOperatorLogLevel(value opera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *CloudCredentialSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *CloudCredentialSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -56,7 +56,7 @@ func (b *CloudCredentialSpecApplyConfiguration) WithUnsupportedConfigOverrides(v // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *CloudCredentialSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *CloudCredentialSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go index f02c542c4..fa6a6f0f6 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go @@ -18,7 +18,7 @@ func CloudCredentialStatus() *CloudCredentialStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *CloudCredentialStatusApplyConfiguration) WithObservedGeneration(value int64) *CloudCredentialStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *CloudCredentialStatusApplyConfiguration) WithConditions(values ...*Oper if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *CloudCredentialStatusApplyConfiguration) WithConditions(values ...*Oper // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *CloudCredentialStatusApplyConfiguration) WithVersion(value string) *CloudCredentialStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *CloudCredentialStatusApplyConfiguration) WithVersion(value string) *Clo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *CloudCredentialStatusApplyConfiguration) WithReadyReplicas(value int32) *CloudCredentialStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *CloudCredentialStatusApplyConfiguration) WithReadyReplicas(value int32) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *CloudCredentialStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *CloudCredentialStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *CloudCredentialStatusApplyConfiguration) WithGenerations(values ...*Gen if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go index 3df2ac8f6..ed2dbb9c1 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ClusterCSIDriverApplyConfiguration represents a declarative configuration of the ClusterCSIDriver type for use // with apply. type ClusterCSIDriverApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ClusterCSIDriverSpecApplyConfiguration `json:"spec,omitempty"` - Status *ClusterCSIDriverStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterCSIDriverSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterCSIDriverStatusApplyConfiguration `json:"status,omitempty"` } // ClusterCSIDriver constructs a declarative configuration of the ClusterCSIDriver type for use with @@ -41,18 +41,18 @@ func ClusterCSIDriver(name string) *ClusterCSIDriverApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterCSIDriver(clusterCSIDriver *apioperatorv1.ClusterCSIDriver, fieldManager string) (*ClusterCSIDriverApplyConfiguration, error) { +func ExtractClusterCSIDriver(clusterCSIDriver *operatorv1.ClusterCSIDriver, fieldManager string) (*ClusterCSIDriverApplyConfiguration, error) { return extractClusterCSIDriver(clusterCSIDriver, fieldManager, "") } // ExtractClusterCSIDriverStatus is the same as ExtractClusterCSIDriver except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterCSIDriverStatus(clusterCSIDriver *apioperatorv1.ClusterCSIDriver, fieldManager string) (*ClusterCSIDriverApplyConfiguration, error) { +func ExtractClusterCSIDriverStatus(clusterCSIDriver *operatorv1.ClusterCSIDriver, fieldManager string) (*ClusterCSIDriverApplyConfiguration, error) { return extractClusterCSIDriver(clusterCSIDriver, fieldManager, "status") } -func extractClusterCSIDriver(clusterCSIDriver *apioperatorv1.ClusterCSIDriver, fieldManager string, subresource string) (*ClusterCSIDriverApplyConfiguration, error) { +func extractClusterCSIDriver(clusterCSIDriver *operatorv1.ClusterCSIDriver, fieldManager string, subresource string) (*ClusterCSIDriverApplyConfiguration, error) { b := &ClusterCSIDriverApplyConfiguration{} err := managedfields.ExtractInto(clusterCSIDriver, internal.Parser().Type("com.github.openshift.api.operator.v1.ClusterCSIDriver"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractClusterCSIDriver(clusterCSIDriver *apioperatorv1.ClusterCSIDriver, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithKind(value string) *ClusterCSIDriverApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithKind(value string) *ClusterCSID // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithAPIVersion(value string) *ClusterCSIDriverApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithAPIVersion(value string) *Clust // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithName(value string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithName(value string) *ClusterCSID // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithGenerateName(value string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithGenerateName(value string) *Clu // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithNamespace(value string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithNamespace(value string) *Cluste // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithUID(value types.UID) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithUID(value types.UID) *ClusterCS // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithResourceVersion(value string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ClusterCSIDriverApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithGeneration(value int64) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterCSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCSIDriverApplyConfiguration { +func (b *ClusterCSIDriverApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterCSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCSIDriverApplyConfiguration { +func (b *ClusterCSIDriverApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ClusterCSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterCSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ClusterCSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *ClusterCSIDriverApplyConfiguration) WithLabels(entries map[string]string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ClusterCSIDriverApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterCSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ClusterCSIDriverApplyConfiguration) WithAnnotations(entries map[string] // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterCSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCSIDriverApplyConfiguration { +func (b *ClusterCSIDriverApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ClusterCSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *ClusterCSIDriverApplyConfiguration) WithFinalizers(values ...string) *ClusterCSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterCSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ClusterCSIDriverApplyConfiguration) WithStatus(value *ClusterCSIDriverS // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterCSIDriverApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go index a5837a95c..a008180f4 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go @@ -25,7 +25,7 @@ func ClusterCSIDriverSpec() *ClusterCSIDriverSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *ClusterCSIDriverSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ClusterCSIDriverSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -33,7 +33,7 @@ func (b *ClusterCSIDriverSpecApplyConfiguration) WithManagementState(value opera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *ClusterCSIDriverSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ClusterCSIDriverSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -41,7 +41,7 @@ func (b *ClusterCSIDriverSpecApplyConfiguration) WithLogLevel(value operatorv1.L // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *ClusterCSIDriverSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ClusterCSIDriverSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -49,7 +49,7 @@ func (b *ClusterCSIDriverSpecApplyConfiguration) WithOperatorLogLevel(value oper // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *ClusterCSIDriverSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ClusterCSIDriverSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -57,7 +57,7 @@ func (b *ClusterCSIDriverSpecApplyConfiguration) WithUnsupportedConfigOverrides( // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *ClusterCSIDriverSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ClusterCSIDriverSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go index ba2c1ee9d..f5e2221b8 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go @@ -18,7 +18,7 @@ func ClusterCSIDriverStatus() *ClusterCSIDriverStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *ClusterCSIDriverStatusApplyConfiguration) WithObservedGeneration(value int64) *ClusterCSIDriverStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *ClusterCSIDriverStatusApplyConfiguration) WithConditions(values ...*Ope if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *ClusterCSIDriverStatusApplyConfiguration) WithConditions(values ...*Ope // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *ClusterCSIDriverStatusApplyConfiguration) WithVersion(value string) *ClusterCSIDriverStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *ClusterCSIDriverStatusApplyConfiguration) WithVersion(value string) *Cl // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *ClusterCSIDriverStatusApplyConfiguration) WithReadyReplicas(value int32) *ClusterCSIDriverStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *ClusterCSIDriverStatusApplyConfiguration) WithReadyReplicas(value int32 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *ClusterCSIDriverStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ClusterCSIDriverStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *ClusterCSIDriverStatusApplyConfiguration) WithGenerations(values ...*Ge if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go index 7dd7d64d5..b884322ae 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ConfigApplyConfiguration represents a declarative configuration of the Config type for use // with apply. type ConfigApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ConfigSpecApplyConfiguration `json:"spec,omitempty"` - Status *ConfigStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ConfigSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConfigStatusApplyConfiguration `json:"status,omitempty"` } // Config constructs a declarative configuration of the Config type for use with @@ -41,18 +41,18 @@ func Config(name string) *ConfigApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractConfig(config *apioperatorv1.Config, fieldManager string) (*ConfigApplyConfiguration, error) { +func ExtractConfig(config *operatorv1.Config, fieldManager string) (*ConfigApplyConfiguration, error) { return extractConfig(config, fieldManager, "") } // ExtractConfigStatus is the same as ExtractConfig except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractConfigStatus(config *apioperatorv1.Config, fieldManager string) (*ConfigApplyConfiguration, error) { +func ExtractConfigStatus(config *operatorv1.Config, fieldManager string) (*ConfigApplyConfiguration, error) { return extractConfig(config, fieldManager, "status") } -func extractConfig(config *apioperatorv1.Config, fieldManager string, subresource string) (*ConfigApplyConfiguration, error) { +func extractConfig(config *operatorv1.Config, fieldManager string, subresource string) (*ConfigApplyConfiguration, error) { b := &ConfigApplyConfiguration{} err := managedfields.ExtractInto(config, internal.Parser().Type("com.github.openshift.api.operator.v1.Config"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractConfig(config *apioperatorv1.Config, fieldManager string, subresourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithKind(value string) *ConfigApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ConfigApplyConfiguration) WithKind(value string) *ConfigApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithAPIVersion(value string) *ConfigApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ConfigApplyConfiguration) WithAPIVersion(value string) *ConfigApplyConf // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithName(value string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ConfigApplyConfiguration) WithName(value string) *ConfigApplyConfigurat // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithGenerateName(value string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ConfigApplyConfiguration) WithGenerateName(value string) *ConfigApplyCo // If called multiple times, the Namespace field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithNamespace(value string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ConfigApplyConfiguration) WithNamespace(value string) *ConfigApplyConfi // If called multiple times, the UID field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithUID(value types.UID) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ConfigApplyConfiguration) WithUID(value types.UID) *ConfigApplyConfigur // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithResourceVersion(value string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ConfigApplyConfiguration) WithResourceVersion(value string) *ConfigAppl // If called multiple times, the Generation field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithGeneration(value int64) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ConfigApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConfigApplyConfiguration { +func (b *ConfigApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ConfigApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConfigApplyConfiguration { +func (b *ConfigApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ConfigApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Con // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) * // overwriting an existing map entries in Labels field with the same key. func (b *ConfigApplyConfiguration) WithLabels(entries map[string]string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ConfigApplyConfiguration) WithLabels(entries map[string]string) *Config // overwriting an existing map entries in Annotations field with the same key. func (b *ConfigApplyConfiguration) WithAnnotations(entries map[string]string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ConfigApplyConfiguration) WithAnnotations(entries map[string]string) *C // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ConfigApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConfigApplyConfiguration { +func (b *ConfigApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ConfigApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefere func (b *ConfigApplyConfiguration) WithFinalizers(values ...string) *ConfigApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ConfigApplyConfiguration) WithStatus(value *ConfigStatusApplyConfigurat // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConfigApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go index b5c044dce..c7d3e93c3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go @@ -23,7 +23,7 @@ func ConfigSpec() *ConfigSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *ConfigSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ConfigSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *ConfigSpecApplyConfiguration) WithManagementState(value operatorv1.Mana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *ConfigSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ConfigSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *ConfigSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) * // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *ConfigSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ConfigSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *ConfigSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.Log // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *ConfigSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ConfigSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *ConfigSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runt // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *ConfigSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ConfigSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go index b151b4e8a..38e52420c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go @@ -18,7 +18,7 @@ func ConfigStatus() *ConfigStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *ConfigStatusApplyConfiguration) WithObservedGeneration(value int64) *ConfigStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *ConfigStatusApplyConfiguration) WithConditions(values ...*OperatorCondi if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *ConfigStatusApplyConfiguration) WithConditions(values ...*OperatorCondi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *ConfigStatusApplyConfiguration) WithVersion(value string) *ConfigStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *ConfigStatusApplyConfiguration) WithVersion(value string) *ConfigStatus // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *ConfigStatusApplyConfiguration) WithReadyReplicas(value int32) *ConfigStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *ConfigStatusApplyConfiguration) WithReadyReplicas(value int32) *ConfigS // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *ConfigStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ConfigStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *ConfigStatusApplyConfiguration) WithGenerations(values ...*GenerationSt if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go index dd7bf4a3d..aaa69f64c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ConsoleApplyConfiguration represents a declarative configuration of the Console type for use // with apply. type ConsoleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"` - Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` } // Console constructs a declarative configuration of the Console type for use with @@ -41,18 +41,18 @@ func Console(name string) *ConsoleApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractConsole(console *apioperatorv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { +func ExtractConsole(console *operatorv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { return extractConsole(console, fieldManager, "") } // ExtractConsoleStatus is the same as ExtractConsole except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractConsoleStatus(console *apioperatorv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { +func ExtractConsoleStatus(console *operatorv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { return extractConsole(console, fieldManager, "status") } -func extractConsole(console *apioperatorv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) { +func extractConsole(console *operatorv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) { b := &ConsoleApplyConfiguration{} err := managedfields.ExtractInto(console, internal.Parser().Type("com.github.openshift.api.operator.v1.Console"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractConsole(console *apioperatorv1.Console, fieldManager string, subreso // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleAp // If called multiple times, the Generation field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithGeneration(value int64) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConsoleApplyConfiguration { +func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConsoleApplyConfiguration { +func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Co // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *Conso // overwriting an existing map entries in Annotations field with the same key. func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration { +func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *ConsoleApplyConfiguration) WithFinalizers(values ...string) *ConsoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ConsoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConsoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go index d8a9310f6..b71ac9f3a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // ConsoleConfigRouteApplyConfiguration represents a declarative configuration of the ConsoleConfigRoute type for use // with apply. type ConsoleConfigRouteApplyConfiguration struct { - Hostname *string `json:"hostname,omitempty"` - Secret *v1.SecretNameReference `json:"secret,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Secret *configv1.SecretNameReference `json:"secret,omitempty"` } // ConsoleConfigRouteApplyConfiguration constructs a declarative configuration of the ConsoleConfigRoute type for use with @@ -30,7 +30,7 @@ func (b *ConsoleConfigRouteApplyConfiguration) WithHostname(value string) *Conso // WithSecret sets the Secret field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Secret field is set to the value of the last call. -func (b *ConsoleConfigRouteApplyConfiguration) WithSecret(value v1.SecretNameReference) *ConsoleConfigRouteApplyConfiguration { +func (b *ConsoleConfigRouteApplyConfiguration) WithSecret(value configv1.SecretNameReference) *ConsoleConfigRouteApplyConfiguration { b.Secret = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go index 421ff84e2..0155ffef3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go @@ -28,7 +28,7 @@ func ConsoleSpec() *ConsoleSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *ConsoleSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ConsoleSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -36,7 +36,7 @@ func (b *ConsoleSpecApplyConfiguration) WithManagementState(value operatorv1.Man // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *ConsoleSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ConsoleSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -44,7 +44,7 @@ func (b *ConsoleSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *ConsoleSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ConsoleSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -52,7 +52,7 @@ func (b *ConsoleSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.Lo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *ConsoleSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ConsoleSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -60,7 +60,7 @@ func (b *ConsoleSpecApplyConfiguration) WithUnsupportedConfigOverrides(value run // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *ConsoleSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ConsoleSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go index c6330be35..ad3493477 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go @@ -18,7 +18,7 @@ func ConsoleStatus() *ConsoleStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *ConsoleStatusApplyConfiguration) WithObservedGeneration(value int64) *ConsoleStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *ConsoleStatusApplyConfiguration) WithConditions(values ...*OperatorCond if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *ConsoleStatusApplyConfiguration) WithConditions(values ...*OperatorCond // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *ConsoleStatusApplyConfiguration) WithVersion(value string) *ConsoleStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *ConsoleStatusApplyConfiguration) WithVersion(value string) *ConsoleStat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *ConsoleStatusApplyConfiguration) WithReadyReplicas(value int32) *ConsoleStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *ConsoleStatusApplyConfiguration) WithReadyReplicas(value int32) *Consol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *ConsoleStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ConsoleStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *ConsoleStatusApplyConfiguration) WithGenerations(values ...*GenerationS if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go index 887cf604e..15b793452 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // CSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the CSIDriverConfigSpec type for use // with apply. type CSIDriverConfigSpecApplyConfiguration struct { - DriverType *v1.CSIDriverType `json:"driverType,omitempty"` + DriverType *operatorv1.CSIDriverType `json:"driverType,omitempty"` AWS *AWSCSIDriverConfigSpecApplyConfiguration `json:"aws,omitempty"` Azure *AzureCSIDriverConfigSpecApplyConfiguration `json:"azure,omitempty"` GCP *GCPCSIDriverConfigSpecApplyConfiguration `json:"gcp,omitempty"` @@ -26,7 +26,7 @@ func CSIDriverConfigSpec() *CSIDriverConfigSpecApplyConfiguration { // WithDriverType sets the DriverType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DriverType field is set to the value of the last call. -func (b *CSIDriverConfigSpecApplyConfiguration) WithDriverType(value v1.CSIDriverType) *CSIDriverConfigSpecApplyConfiguration { +func (b *CSIDriverConfigSpecApplyConfiguration) WithDriverType(value operatorv1.CSIDriverType) *CSIDriverConfigSpecApplyConfiguration { b.DriverType = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go index 035aee886..d9b93f620 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CSISnapshotControllerApplyConfiguration represents a declarative configuration of the CSISnapshotController type for use // with apply. type CSISnapshotControllerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CSISnapshotControllerSpecApplyConfiguration `json:"spec,omitempty"` - Status *CSISnapshotControllerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CSISnapshotControllerSpecApplyConfiguration `json:"spec,omitempty"` + Status *CSISnapshotControllerStatusApplyConfiguration `json:"status,omitempty"` } // CSISnapshotController constructs a declarative configuration of the CSISnapshotController type for use with @@ -41,18 +41,18 @@ func CSISnapshotController(name string) *CSISnapshotControllerApplyConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSISnapshotController(cSISnapshotController *apioperatorv1.CSISnapshotController, fieldManager string) (*CSISnapshotControllerApplyConfiguration, error) { +func ExtractCSISnapshotController(cSISnapshotController *operatorv1.CSISnapshotController, fieldManager string) (*CSISnapshotControllerApplyConfiguration, error) { return extractCSISnapshotController(cSISnapshotController, fieldManager, "") } // ExtractCSISnapshotControllerStatus is the same as ExtractCSISnapshotController except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSISnapshotControllerStatus(cSISnapshotController *apioperatorv1.CSISnapshotController, fieldManager string) (*CSISnapshotControllerApplyConfiguration, error) { +func ExtractCSISnapshotControllerStatus(cSISnapshotController *operatorv1.CSISnapshotController, fieldManager string) (*CSISnapshotControllerApplyConfiguration, error) { return extractCSISnapshotController(cSISnapshotController, fieldManager, "status") } -func extractCSISnapshotController(cSISnapshotController *apioperatorv1.CSISnapshotController, fieldManager string, subresource string) (*CSISnapshotControllerApplyConfiguration, error) { +func extractCSISnapshotController(cSISnapshotController *operatorv1.CSISnapshotController, fieldManager string, subresource string) (*CSISnapshotControllerApplyConfiguration, error) { b := &CSISnapshotControllerApplyConfiguration{} err := managedfields.ExtractInto(cSISnapshotController, internal.Parser().Type("com.github.openshift.api.operator.v1.CSISnapshotController"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractCSISnapshotController(cSISnapshotController *apioperatorv1.CSISnapsh // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithKind(value string) *CSISnapshotControllerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithKind(value string) *CSISna // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithAPIVersion(value string) *CSISnapshotControllerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithName(value string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithName(value string) *CSISna // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithGenerateName(value string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithNamespace(value string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithNamespace(value string) *C // If called multiple times, the UID field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithUID(value types.UID) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithUID(value types.UID) *CSIS // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithResourceVersion(value string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *CSISnapshotControllerApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithGeneration(value int64) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSISnapshotControllerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSISnapshotControllerApplyConfiguration { +func (b *CSISnapshotControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSISnapshotControllerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSISnapshotControllerApplyConfiguration { +func (b *CSISnapshotControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *CSISnapshotControllerApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSISnapshotControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *CSISnapshotControllerApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *CSISnapshotControllerApplyConfiguration) WithLabels(entries map[string]string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *CSISnapshotControllerApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *CSISnapshotControllerApplyConfiguration) WithAnnotations(entries map[string]string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *CSISnapshotControllerApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSISnapshotControllerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSISnapshotControllerApplyConfiguration { +func (b *CSISnapshotControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *CSISnapshotControllerApplyConfiguration) WithOwnerReferences(values ... func (b *CSISnapshotControllerApplyConfiguration) WithFinalizers(values ...string) *CSISnapshotControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSISnapshotControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *CSISnapshotControllerApplyConfiguration) WithStatus(value *CSISnapshotC // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSISnapshotControllerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go index e614041cb..fd90faa2d 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go @@ -23,7 +23,7 @@ func CSISnapshotControllerSpec() *CSISnapshotControllerSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *CSISnapshotControllerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *CSISnapshotControllerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *CSISnapshotControllerSpecApplyConfiguration) WithManagementState(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *CSISnapshotControllerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *CSISnapshotControllerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *CSISnapshotControllerSpecApplyConfiguration) WithLogLevel(value operato // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *CSISnapshotControllerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *CSISnapshotControllerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *CSISnapshotControllerSpecApplyConfiguration) WithOperatorLogLevel(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *CSISnapshotControllerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *CSISnapshotControllerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *CSISnapshotControllerSpecApplyConfiguration) WithUnsupportedConfigOverr // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *CSISnapshotControllerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *CSISnapshotControllerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go index d3fa5d05c..5b6d30d8d 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go @@ -18,7 +18,7 @@ func CSISnapshotControllerStatus() *CSISnapshotControllerStatusApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *CSISnapshotControllerStatusApplyConfiguration) WithObservedGeneration(value int64) *CSISnapshotControllerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *CSISnapshotControllerStatusApplyConfiguration) WithConditions(values .. if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *CSISnapshotControllerStatusApplyConfiguration) WithConditions(values .. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *CSISnapshotControllerStatusApplyConfiguration) WithVersion(value string) *CSISnapshotControllerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *CSISnapshotControllerStatusApplyConfiguration) WithVersion(value string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *CSISnapshotControllerStatusApplyConfiguration) WithReadyReplicas(value int32) *CSISnapshotControllerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *CSISnapshotControllerStatusApplyConfiguration) WithReadyReplicas(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *CSISnapshotControllerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *CSISnapshotControllerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *CSISnapshotControllerStatusApplyConfiguration) WithGenerations(values . if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go index c8689ee18..b62419ac4 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // DefaultNetworkDefinitionApplyConfiguration represents a declarative configuration of the DefaultNetworkDefinition type for use // with apply. type DefaultNetworkDefinitionApplyConfiguration struct { - Type *v1.NetworkType `json:"type,omitempty"` + Type *operatorv1.NetworkType `json:"type,omitempty"` OpenShiftSDNConfig *OpenShiftSDNConfigApplyConfiguration `json:"openshiftSDNConfig,omitempty"` OVNKubernetesConfig *OVNKubernetesConfigApplyConfiguration `json:"ovnKubernetesConfig,omitempty"` } @@ -23,7 +23,7 @@ func DefaultNetworkDefinition() *DefaultNetworkDefinitionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DefaultNetworkDefinitionApplyConfiguration) WithType(value v1.NetworkType) *DefaultNetworkDefinitionApplyConfiguration { +func (b *DefaultNetworkDefinitionApplyConfiguration) WithType(value operatorv1.NetworkType) *DefaultNetworkDefinitionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go index 3aa286ff1..2a296d731 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go @@ -19,7 +19,7 @@ func DeveloperConsoleCatalogCategory() *DeveloperConsoleCatalogCategoryApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ID field is set to the value of the last call. func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithID(value string) *DeveloperConsoleCatalogCategoryApplyConfiguration { - b.ID = &value + b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.ID = &value return b } @@ -27,7 +27,7 @@ func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithID(value string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Label field is set to the value of the last call. func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithLabel(value string) *DeveloperConsoleCatalogCategoryApplyConfiguration { - b.Label = &value + b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.Label = &value return b } @@ -36,7 +36,7 @@ func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithLabel(value stri // If called multiple times, values provided by each call will be appended to the Tags field. func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithTags(values ...string) *DeveloperConsoleCatalogCategoryApplyConfiguration { for i := range values { - b.Tags = append(b.Tags, values[i]) + b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.Tags = append(b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.Tags, values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go index 439dafbf4..d847d2065 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // DeveloperConsoleCatalogTypesApplyConfiguration represents a declarative configuration of the DeveloperConsoleCatalogTypes type for use // with apply. type DeveloperConsoleCatalogTypesApplyConfiguration struct { - State *v1.CatalogTypesState `json:"state,omitempty"` - Enabled *[]string `json:"enabled,omitempty"` - Disabled *[]string `json:"disabled,omitempty"` + State *operatorv1.CatalogTypesState `json:"state,omitempty"` + Enabled *[]string `json:"enabled,omitempty"` + Disabled *[]string `json:"disabled,omitempty"` } // DeveloperConsoleCatalogTypesApplyConfiguration constructs a declarative configuration of the DeveloperConsoleCatalogTypes type for use with @@ -23,7 +23,7 @@ func DeveloperConsoleCatalogTypes() *DeveloperConsoleCatalogTypesApplyConfigurat // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithState(value v1.CatalogTypesState) *DeveloperConsoleCatalogTypesApplyConfiguration { +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithState(value operatorv1.CatalogTypesState) *DeveloperConsoleCatalogTypesApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go index 0e31c5af9..7b2cb3d36 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // DNSApplyConfiguration represents a declarative configuration of the DNS type for use // with apply. type DNSApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"` - Status *DNSStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"` + Status *DNSStatusApplyConfiguration `json:"status,omitempty"` } // DNS constructs a declarative configuration of the DNS type for use with @@ -41,18 +41,18 @@ func DNS(name string) *DNSApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractDNS(dNS *apioperatorv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { +func ExtractDNS(dNS *operatorv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { return extractDNS(dNS, fieldManager, "") } // ExtractDNSStatus is the same as ExtractDNS except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractDNSStatus(dNS *apioperatorv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { +func ExtractDNSStatus(dNS *operatorv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { return extractDNS(dNS, fieldManager, "status") } -func extractDNS(dNS *apioperatorv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) { +func extractDNS(dNS *operatorv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) { b := &DNSApplyConfiguration{} err := managedfields.ExtractInto(dNS, internal.Parser().Type("com.github.openshift.api.operator.v1.DNS"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractDNS(dNS *apioperatorv1.DNS, fieldManager string, subresource string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfigurat // If called multiple times, the Name field is set to the value of the last call. func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration { // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfigur // If called multiple times, the Namespace field is set to the value of the last call. func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfigurati // If called multiple times, the UID field is set to the value of the last call. func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfi // If called multiple times, the Generation field is set to the value of the last call. func (b *DNSApplyConfiguration) WithGeneration(value int64) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DNSApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DNSApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *DNSApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DNSApp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNS // overwriting an existing map entries in Labels field with the same key. func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyC // overwriting an existing map entries in Annotations field with the same key. func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration { +func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference func (b *DNSApplyConfiguration) WithFinalizers(values ...string) *DNSApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *DNSApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *DNSApplyConfiguration) WithStatus(value *DNSStatusApplyConfiguration) * // GetName retrieves the value of the Name field in the declarative configuration. func (b *DNSApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go index 4a387f634..09244ed90 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // DNSCacheApplyConfiguration represents a declarative configuration of the DNSCache type for use // with apply. type DNSCacheApplyConfiguration struct { - PositiveTTL *v1.Duration `json:"positiveTTL,omitempty"` - NegativeTTL *v1.Duration `json:"negativeTTL,omitempty"` + PositiveTTL *metav1.Duration `json:"positiveTTL,omitempty"` + NegativeTTL *metav1.Duration `json:"negativeTTL,omitempty"` } // DNSCacheApplyConfiguration constructs a declarative configuration of the DNSCache type for use with @@ -22,7 +22,7 @@ func DNSCache() *DNSCacheApplyConfiguration { // WithPositiveTTL sets the PositiveTTL field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PositiveTTL field is set to the value of the last call. -func (b *DNSCacheApplyConfiguration) WithPositiveTTL(value v1.Duration) *DNSCacheApplyConfiguration { +func (b *DNSCacheApplyConfiguration) WithPositiveTTL(value metav1.Duration) *DNSCacheApplyConfiguration { b.PositiveTTL = &value return b } @@ -30,7 +30,7 @@ func (b *DNSCacheApplyConfiguration) WithPositiveTTL(value v1.Duration) *DNSCach // WithNegativeTTL sets the NegativeTTL field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NegativeTTL field is set to the value of the last call. -func (b *DNSCacheApplyConfiguration) WithNegativeTTL(value v1.Duration) *DNSCacheApplyConfiguration { +func (b *DNSCacheApplyConfiguration) WithNegativeTTL(value metav1.Duration) *DNSCacheApplyConfiguration { b.NegativeTTL = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go index 1c941d66e..b82c86dd3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // DNSNodePlacementApplyConfiguration represents a declarative configuration of the DNSNodePlacement type for use // with apply. type DNSNodePlacementApplyConfiguration struct { - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // DNSNodePlacementApplyConfiguration constructs a declarative configuration of the DNSNodePlacement type for use with @@ -36,7 +36,7 @@ func (b *DNSNodePlacementApplyConfiguration) WithNodeSelector(entries map[string // WithTolerations adds the given value to the Tolerations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Tolerations field. -func (b *DNSNodePlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *DNSNodePlacementApplyConfiguration { +func (b *DNSNodePlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *DNSNodePlacementApplyConfiguration { for i := range values { b.Tolerations = append(b.Tolerations, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go index 7e6ff33d6..7267bca62 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" ) // DNSOverTLSConfigApplyConfiguration represents a declarative configuration of the DNSOverTLSConfig type for use // with apply. type DNSOverTLSConfigApplyConfiguration struct { - ServerName *string `json:"serverName,omitempty"` - CABundle *v1.ConfigMapNameReference `json:"caBundle,omitempty"` + ServerName *string `json:"serverName,omitempty"` + CABundle *configv1.ConfigMapNameReference `json:"caBundle,omitempty"` } // DNSOverTLSConfigApplyConfiguration constructs a declarative configuration of the DNSOverTLSConfig type for use with @@ -30,7 +30,7 @@ func (b *DNSOverTLSConfigApplyConfiguration) WithServerName(value string) *DNSOv // WithCABundle sets the CABundle field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CABundle field is set to the value of the last call. -func (b *DNSOverTLSConfigApplyConfiguration) WithCABundle(value v1.ConfigMapNameReference) *DNSOverTLSConfigApplyConfiguration { +func (b *DNSOverTLSConfigApplyConfiguration) WithCABundle(value configv1.ConfigMapNameReference) *DNSOverTLSConfigApplyConfiguration { b.CABundle = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go index 52bc39563..1b689670c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // DNSTransportConfigApplyConfiguration represents a declarative configuration of the DNSTransportConfig type for use // with apply. type DNSTransportConfigApplyConfiguration struct { - Transport *v1.DNSTransport `json:"transport,omitempty"` + Transport *operatorv1.DNSTransport `json:"transport,omitempty"` TLS *DNSOverTLSConfigApplyConfiguration `json:"tls,omitempty"` } @@ -22,7 +22,7 @@ func DNSTransportConfig() *DNSTransportConfigApplyConfiguration { // WithTransport sets the Transport field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Transport field is set to the value of the last call. -func (b *DNSTransportConfigApplyConfiguration) WithTransport(value v1.DNSTransport) *DNSTransportConfigApplyConfiguration { +func (b *DNSTransportConfigApplyConfiguration) WithTransport(value operatorv1.DNSTransport) *DNSTransportConfigApplyConfiguration { b.Transport = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go index 876f91e97..f4006d50c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go @@ -3,17 +3,17 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // EndpointPublishingStrategyApplyConfiguration represents a declarative configuration of the EndpointPublishingStrategy type for use // with apply. type EndpointPublishingStrategyApplyConfiguration struct { - Type *v1.EndpointPublishingStrategyType `json:"type,omitempty"` - LoadBalancer *LoadBalancerStrategyApplyConfiguration `json:"loadBalancer,omitempty"` - HostNetwork *HostNetworkStrategyApplyConfiguration `json:"hostNetwork,omitempty"` - Private *PrivateStrategyApplyConfiguration `json:"private,omitempty"` - NodePort *NodePortStrategyApplyConfiguration `json:"nodePort,omitempty"` + Type *operatorv1.EndpointPublishingStrategyType `json:"type,omitempty"` + LoadBalancer *LoadBalancerStrategyApplyConfiguration `json:"loadBalancer,omitempty"` + HostNetwork *HostNetworkStrategyApplyConfiguration `json:"hostNetwork,omitempty"` + Private *PrivateStrategyApplyConfiguration `json:"private,omitempty"` + NodePort *NodePortStrategyApplyConfiguration `json:"nodePort,omitempty"` } // EndpointPublishingStrategyApplyConfiguration constructs a declarative configuration of the EndpointPublishingStrategy type for use with @@ -25,7 +25,7 @@ func EndpointPublishingStrategy() *EndpointPublishingStrategyApplyConfiguration // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *EndpointPublishingStrategyApplyConfiguration) WithType(value v1.EndpointPublishingStrategyType) *EndpointPublishingStrategyApplyConfiguration { +func (b *EndpointPublishingStrategyApplyConfiguration) WithType(value operatorv1.EndpointPublishingStrategyType) *EndpointPublishingStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go index bf091f4e6..de118401e 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // EtcdApplyConfiguration represents a declarative configuration of the Etcd type for use // with apply. type EtcdApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *EtcdSpecApplyConfiguration `json:"spec,omitempty"` - Status *EtcdStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *EtcdSpecApplyConfiguration `json:"spec,omitempty"` + Status *EtcdStatusApplyConfiguration `json:"status,omitempty"` } // Etcd constructs a declarative configuration of the Etcd type for use with @@ -41,18 +41,18 @@ func Etcd(name string) *EtcdApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEtcd(etcd *apioperatorv1.Etcd, fieldManager string) (*EtcdApplyConfiguration, error) { +func ExtractEtcd(etcd *operatorv1.Etcd, fieldManager string) (*EtcdApplyConfiguration, error) { return extractEtcd(etcd, fieldManager, "") } // ExtractEtcdStatus is the same as ExtractEtcd except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEtcdStatus(etcd *apioperatorv1.Etcd, fieldManager string) (*EtcdApplyConfiguration, error) { +func ExtractEtcdStatus(etcd *operatorv1.Etcd, fieldManager string) (*EtcdApplyConfiguration, error) { return extractEtcd(etcd, fieldManager, "status") } -func extractEtcd(etcd *apioperatorv1.Etcd, fieldManager string, subresource string) (*EtcdApplyConfiguration, error) { +func extractEtcd(etcd *operatorv1.Etcd, fieldManager string, subresource string) (*EtcdApplyConfiguration, error) { b := &EtcdApplyConfiguration{} err := managedfields.ExtractInto(etcd, internal.Parser().Type("com.github.openshift.api.operator.v1.Etcd"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractEtcd(etcd *apioperatorv1.Etcd, fieldManager string, subresource stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithKind(value string) *EtcdApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *EtcdApplyConfiguration) WithKind(value string) *EtcdApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithAPIVersion(value string) *EtcdApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *EtcdApplyConfiguration) WithAPIVersion(value string) *EtcdApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithName(value string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *EtcdApplyConfiguration) WithName(value string) *EtcdApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithGenerateName(value string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *EtcdApplyConfiguration) WithGenerateName(value string) *EtcdApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithNamespace(value string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *EtcdApplyConfiguration) WithNamespace(value string) *EtcdApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithUID(value types.UID) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *EtcdApplyConfiguration) WithUID(value types.UID) *EtcdApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithResourceVersion(value string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *EtcdApplyConfiguration) WithResourceVersion(value string) *EtcdApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithGeneration(value int64) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EtcdApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EtcdApplyConfiguration { +func (b *EtcdApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EtcdApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EtcdApplyConfiguration { +func (b *EtcdApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *EtcdApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EtcdA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EtcdApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *EtcdApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Et // overwriting an existing map entries in Labels field with the same key. func (b *EtcdApplyConfiguration) WithLabels(entries map[string]string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *EtcdApplyConfiguration) WithLabels(entries map[string]string) *EtcdAppl // overwriting an existing map entries in Annotations field with the same key. func (b *EtcdApplyConfiguration) WithAnnotations(entries map[string]string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *EtcdApplyConfiguration) WithAnnotations(entries map[string]string) *Etc // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EtcdApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EtcdApplyConfiguration { +func (b *EtcdApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *EtcdApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *EtcdApplyConfiguration) WithFinalizers(values ...string) *EtcdApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EtcdApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *EtcdApplyConfiguration) WithStatus(value *EtcdStatusApplyConfiguration) // GetName retrieves the value of the Name field in the declarative configuration. func (b *EtcdApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go index 49a3055c2..6588c0922 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go @@ -25,7 +25,7 @@ func EtcdSpec() *EtcdSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *EtcdSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -33,7 +33,7 @@ func (b *EtcdSpecApplyConfiguration) WithManagementState(value operatorv1.Manage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *EtcdSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -41,7 +41,7 @@ func (b *EtcdSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *Et // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *EtcdSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -49,7 +49,7 @@ func (b *EtcdSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLe // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *EtcdSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -57,7 +57,7 @@ func (b *EtcdSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtim // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *EtcdSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } @@ -65,7 +65,7 @@ func (b *EtcdSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtensi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithForceRedeploymentReason(value string) *EtcdSpecApplyConfiguration { - b.ForceRedeploymentReason = &value + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value return b } @@ -73,7 +73,7 @@ func (b *EtcdSpecApplyConfiguration) WithForceRedeploymentReason(value string) * // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FailedRevisionLimit field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *EtcdSpecApplyConfiguration { - b.FailedRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value return b } @@ -81,7 +81,7 @@ func (b *EtcdSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *EtcdS // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. func (b *EtcdSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *EtcdSpecApplyConfiguration { - b.SucceededRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go index 7d545a900..a6fa6f07d 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go @@ -23,7 +23,7 @@ func EtcdStatus() *EtcdStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *EtcdStatusApplyConfiguration) WithObservedGeneration(value int64) *EtcdStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -35,7 +35,7 @@ func (b *EtcdStatusApplyConfiguration) WithConditions(values ...*OperatorConditi if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -44,7 +44,7 @@ func (b *EtcdStatusApplyConfiguration) WithConditions(values ...*OperatorConditi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *EtcdStatusApplyConfiguration) WithVersion(value string) *EtcdStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -52,7 +52,7 @@ func (b *EtcdStatusApplyConfiguration) WithVersion(value string) *EtcdStatusAppl // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *EtcdStatusApplyConfiguration) WithReadyReplicas(value int32) *EtcdStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -60,7 +60,7 @@ func (b *EtcdStatusApplyConfiguration) WithReadyReplicas(value int32) *EtcdStatu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *EtcdStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *EtcdStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -72,7 +72,7 @@ func (b *EtcdStatusApplyConfiguration) WithGenerations(values ...*GenerationStat if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } @@ -81,7 +81,7 @@ func (b *EtcdStatusApplyConfiguration) WithGenerations(values ...*GenerationStat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. func (b *EtcdStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *EtcdStatusApplyConfiguration { - b.LatestAvailableRevisionReason = &value + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value return b } @@ -93,7 +93,7 @@ func (b *EtcdStatusApplyConfiguration) WithNodeStatuses(values ...*NodeStatusApp if values[i] == nil { panic("nil value passed to WithNodeStatuses") } - b.NodeStatuses = append(b.NodeStatuses, *values[i]) + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go index d6c8d3bbc..5d0112b46 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // ForwardPluginApplyConfiguration represents a declarative configuration of the ForwardPlugin type for use // with apply. type ForwardPluginApplyConfiguration struct { Upstreams []string `json:"upstreams,omitempty"` - Policy *v1.ForwardingPolicy `json:"policy,omitempty"` + Policy *operatorv1.ForwardingPolicy `json:"policy,omitempty"` TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` - ProtocolStrategy *v1.ProtocolStrategy `json:"protocolStrategy,omitempty"` + ProtocolStrategy *operatorv1.ProtocolStrategy `json:"protocolStrategy,omitempty"` } // ForwardPluginApplyConfiguration constructs a declarative configuration of the ForwardPlugin type for use with @@ -34,7 +34,7 @@ func (b *ForwardPluginApplyConfiguration) WithUpstreams(values ...string) *Forwa // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ForwardPluginApplyConfiguration) WithPolicy(value v1.ForwardingPolicy) *ForwardPluginApplyConfiguration { +func (b *ForwardPluginApplyConfiguration) WithPolicy(value operatorv1.ForwardingPolicy) *ForwardPluginApplyConfiguration { b.Policy = &value return b } @@ -50,7 +50,7 @@ func (b *ForwardPluginApplyConfiguration) WithTransportConfig(value *DNSTranspor // WithProtocolStrategy sets the ProtocolStrategy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ProtocolStrategy field is set to the value of the last call. -func (b *ForwardPluginApplyConfiguration) WithProtocolStrategy(value v1.ProtocolStrategy) *ForwardPluginApplyConfiguration { +func (b *ForwardPluginApplyConfiguration) WithProtocolStrategy(value operatorv1.ProtocolStrategy) *ForwardPluginApplyConfiguration { b.ProtocolStrategy = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go index 54fe27431..a18f0400c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // GatewayConfigApplyConfiguration represents a declarative configuration of the GatewayConfig type for use // with apply. type GatewayConfigApplyConfiguration struct { RoutingViaHost *bool `json:"routingViaHost,omitempty"` - IPForwarding *v1.IPForwardingMode `json:"ipForwarding,omitempty"` + IPForwarding *operatorv1.IPForwardingMode `json:"ipForwarding,omitempty"` IPv4 *IPv4GatewayConfigApplyConfiguration `json:"ipv4,omitempty"` IPv6 *IPv6GatewayConfigApplyConfiguration `json:"ipv6,omitempty"` } @@ -32,7 +32,7 @@ func (b *GatewayConfigApplyConfiguration) WithRoutingViaHost(value bool) *Gatewa // WithIPForwarding sets the IPForwarding field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IPForwarding field is set to the value of the last call. -func (b *GatewayConfigApplyConfiguration) WithIPForwarding(value v1.IPForwardingMode) *GatewayConfigApplyConfiguration { +func (b *GatewayConfigApplyConfiguration) WithIPForwarding(value operatorv1.IPForwardingMode) *GatewayConfigApplyConfiguration { b.IPForwarding = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go index a0d62445e..b2fd36c26 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go @@ -3,16 +3,16 @@ package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // GathererStatusApplyConfiguration represents a declarative configuration of the GathererStatus type for use // with apply. type GathererStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` - Name *string `json:"name,omitempty"` - LastGatherDuration *metav1.Duration `json:"lastGatherDuration,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Name *string `json:"name,omitempty"` + LastGatherDuration *apismetav1.Duration `json:"lastGatherDuration,omitempty"` } // GathererStatusApplyConfiguration constructs a declarative configuration of the GathererStatus type for use with @@ -24,7 +24,7 @@ func GathererStatus() *GathererStatusApplyConfiguration { // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *GathererStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *GathererStatusApplyConfiguration { +func (b *GathererStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *GathererStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") @@ -45,7 +45,7 @@ func (b *GathererStatusApplyConfiguration) WithName(value string) *GathererStatu // WithLastGatherDuration sets the LastGatherDuration field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastGatherDuration field is set to the value of the last call. -func (b *GathererStatusApplyConfiguration) WithLastGatherDuration(value metav1.Duration) *GathererStatusApplyConfiguration { +func (b *GathererStatusApplyConfiguration) WithLastGatherDuration(value apismetav1.Duration) *GathererStatusApplyConfiguration { b.LastGatherDuration = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go index 413967546..e2601419a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // GatherStatusApplyConfiguration represents a declarative configuration of the GatherStatus type for use // with apply. type GatherStatusApplyConfiguration struct { - LastGatherTime *v1.Time `json:"lastGatherTime,omitempty"` - LastGatherDuration *v1.Duration `json:"lastGatherDuration,omitempty"` + LastGatherTime *metav1.Time `json:"lastGatherTime,omitempty"` + LastGatherDuration *metav1.Duration `json:"lastGatherDuration,omitempty"` Gatherers []GathererStatusApplyConfiguration `json:"gatherers,omitempty"` } @@ -23,7 +23,7 @@ func GatherStatus() *GatherStatusApplyConfiguration { // WithLastGatherTime sets the LastGatherTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastGatherTime field is set to the value of the last call. -func (b *GatherStatusApplyConfiguration) WithLastGatherTime(value v1.Time) *GatherStatusApplyConfiguration { +func (b *GatherStatusApplyConfiguration) WithLastGatherTime(value metav1.Time) *GatherStatusApplyConfiguration { b.LastGatherTime = &value return b } @@ -31,7 +31,7 @@ func (b *GatherStatusApplyConfiguration) WithLastGatherTime(value v1.Time) *Gath // WithLastGatherDuration sets the LastGatherDuration field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastGatherDuration field is set to the value of the last call. -func (b *GatherStatusApplyConfiguration) WithLastGatherDuration(value v1.Duration) *GatherStatusApplyConfiguration { +func (b *GatherStatusApplyConfiguration) WithLastGatherDuration(value metav1.Duration) *GatherStatusApplyConfiguration { b.LastGatherDuration = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go index a5e0306c4..dbb621720 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // GCPLoadBalancerParametersApplyConfiguration represents a declarative configuration of the GCPLoadBalancerParameters type for use // with apply. type GCPLoadBalancerParametersApplyConfiguration struct { - ClientAccess *v1.GCPClientAccess `json:"clientAccess,omitempty"` + ClientAccess *operatorv1.GCPClientAccess `json:"clientAccess,omitempty"` } // GCPLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the GCPLoadBalancerParameters type for use with @@ -21,7 +21,7 @@ func GCPLoadBalancerParameters() *GCPLoadBalancerParametersApplyConfiguration { // WithClientAccess sets the ClientAccess field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ClientAccess field is set to the value of the last call. -func (b *GCPLoadBalancerParametersApplyConfiguration) WithClientAccess(value v1.GCPClientAccess) *GCPLoadBalancerParametersApplyConfiguration { +func (b *GCPLoadBalancerParametersApplyConfiguration) WithClientAccess(value operatorv1.GCPClientAccess) *GCPLoadBalancerParametersApplyConfiguration { b.ClientAccess = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go index 4b8d600ba..0f4cfac5a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // HealthCheckApplyConfiguration represents a declarative configuration of the HealthCheck type for use // with apply. type HealthCheckApplyConfiguration struct { - Description *string `json:"description,omitempty"` - TotalRisk *int32 `json:"totalRisk,omitempty"` - AdvisorURI *string `json:"advisorURI,omitempty"` - State *v1.HealthCheckState `json:"state,omitempty"` + Description *string `json:"description,omitempty"` + TotalRisk *int32 `json:"totalRisk,omitempty"` + AdvisorURI *string `json:"advisorURI,omitempty"` + State *operatorv1.HealthCheckState `json:"state,omitempty"` } // HealthCheckApplyConfiguration constructs a declarative configuration of the HealthCheck type for use with @@ -48,7 +48,7 @@ func (b *HealthCheckApplyConfiguration) WithAdvisorURI(value string) *HealthChec // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *HealthCheckApplyConfiguration) WithState(value v1.HealthCheckState) *HealthCheckApplyConfiguration { +func (b *HealthCheckApplyConfiguration) WithState(value operatorv1.HealthCheckState) *HealthCheckApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go index 2b7e1aadb..a667c16f3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // HostNetworkStrategyApplyConfiguration represents a declarative configuration of the HostNetworkStrategy type for use // with apply. type HostNetworkStrategyApplyConfiguration struct { - Protocol *v1.IngressControllerProtocol `json:"protocol,omitempty"` - HTTPPort *int32 `json:"httpPort,omitempty"` - HTTPSPort *int32 `json:"httpsPort,omitempty"` - StatsPort *int32 `json:"statsPort,omitempty"` + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` + HTTPPort *int32 `json:"httpPort,omitempty"` + HTTPSPort *int32 `json:"httpsPort,omitempty"` + StatsPort *int32 `json:"statsPort,omitempty"` } // HostNetworkStrategyApplyConfiguration constructs a declarative configuration of the HostNetworkStrategy type for use with @@ -24,7 +24,7 @@ func HostNetworkStrategy() *HostNetworkStrategyApplyConfiguration { // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *HostNetworkStrategyApplyConfiguration) WithProtocol(value v1.IngressControllerProtocol) *HostNetworkStrategyApplyConfiguration { +func (b *HostNetworkStrategyApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *HostNetworkStrategyApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go index 2a1b2022a..cd83a0461 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // HTTPCompressionPolicyApplyConfiguration represents a declarative configuration of the HTTPCompressionPolicy type for use // with apply. type HTTPCompressionPolicyApplyConfiguration struct { - MimeTypes []v1.CompressionMIMEType `json:"mimeTypes,omitempty"` + MimeTypes []operatorv1.CompressionMIMEType `json:"mimeTypes,omitempty"` } // HTTPCompressionPolicyApplyConfiguration constructs a declarative configuration of the HTTPCompressionPolicy type for use with @@ -21,7 +21,7 @@ func HTTPCompressionPolicy() *HTTPCompressionPolicyApplyConfiguration { // WithMimeTypes adds the given value to the MimeTypes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the MimeTypes field. -func (b *HTTPCompressionPolicyApplyConfiguration) WithMimeTypes(values ...v1.CompressionMIMEType) *HTTPCompressionPolicyApplyConfiguration { +func (b *HTTPCompressionPolicyApplyConfiguration) WithMimeTypes(values ...operatorv1.CompressionMIMEType) *HTTPCompressionPolicyApplyConfiguration { for i := range values { b.MimeTypes = append(b.MimeTypes, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go index e60ab354e..065c61554 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IBMLoadBalancerParametersApplyConfiguration represents a declarative configuration of the IBMLoadBalancerParameters type for use // with apply. type IBMLoadBalancerParametersApplyConfiguration struct { - Protocol *v1.IngressControllerProtocol `json:"protocol,omitempty"` + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` } // IBMLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the IBMLoadBalancerParameters type for use with @@ -21,7 +21,7 @@ func IBMLoadBalancerParameters() *IBMLoadBalancerParametersApplyConfiguration { // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *IBMLoadBalancerParametersApplyConfiguration) WithProtocol(value v1.IngressControllerProtocol) *IBMLoadBalancerParametersApplyConfiguration { +func (b *IBMLoadBalancerParametersApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *IBMLoadBalancerParametersApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go index a37177b67..e23139014 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // IngressControllerApplyConfiguration represents a declarative configuration of the IngressController type for use // with apply. type IngressControllerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *IngressControllerSpecApplyConfiguration `json:"spec,omitempty"` - Status *IngressControllerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressControllerSpecApplyConfiguration `json:"spec,omitempty"` + Status *IngressControllerStatusApplyConfiguration `json:"status,omitempty"` } // IngressController constructs a declarative configuration of the IngressController type for use with @@ -42,18 +42,18 @@ func IngressController(name, namespace string) *IngressControllerApplyConfigurat // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractIngressController(ingressController *apioperatorv1.IngressController, fieldManager string) (*IngressControllerApplyConfiguration, error) { +func ExtractIngressController(ingressController *operatorv1.IngressController, fieldManager string) (*IngressControllerApplyConfiguration, error) { return extractIngressController(ingressController, fieldManager, "") } // ExtractIngressControllerStatus is the same as ExtractIngressController except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractIngressControllerStatus(ingressController *apioperatorv1.IngressController, fieldManager string) (*IngressControllerApplyConfiguration, error) { +func ExtractIngressControllerStatus(ingressController *operatorv1.IngressController, fieldManager string) (*IngressControllerApplyConfiguration, error) { return extractIngressController(ingressController, fieldManager, "status") } -func extractIngressController(ingressController *apioperatorv1.IngressController, fieldManager string, subresource string) (*IngressControllerApplyConfiguration, error) { +func extractIngressController(ingressController *operatorv1.IngressController, fieldManager string, subresource string) (*IngressControllerApplyConfiguration, error) { b := &IngressControllerApplyConfiguration{} err := managedfields.ExtractInto(ingressController, internal.Parser().Type("com.github.openshift.api.operator.v1.IngressController"), fieldManager, b, subresource) if err != nil { @@ -71,7 +71,7 @@ func extractIngressController(ingressController *apioperatorv1.IngressController // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithKind(value string) *IngressControllerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -79,7 +79,7 @@ func (b *IngressControllerApplyConfiguration) WithKind(value string) *IngressCon // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithAPIVersion(value string) *IngressControllerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -88,7 +88,7 @@ func (b *IngressControllerApplyConfiguration) WithAPIVersion(value string) *Ingr // If called multiple times, the Name field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithName(value string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -97,7 +97,7 @@ func (b *IngressControllerApplyConfiguration) WithName(value string) *IngressCon // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithGenerateName(value string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -106,7 +106,7 @@ func (b *IngressControllerApplyConfiguration) WithGenerateName(value string) *In // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithNamespace(value string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -115,7 +115,7 @@ func (b *IngressControllerApplyConfiguration) WithNamespace(value string) *Ingre // If called multiple times, the UID field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithUID(value types.UID) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -124,7 +124,7 @@ func (b *IngressControllerApplyConfiguration) WithUID(value types.UID) *IngressC // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithResourceVersion(value string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -133,25 +133,25 @@ func (b *IngressControllerApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithGeneration(value int64) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *IngressControllerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressControllerApplyConfiguration { +func (b *IngressControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *IngressControllerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressControllerApplyConfiguration { +func (b *IngressControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -160,7 +160,7 @@ func (b *IngressControllerApplyConfiguration) WithDeletionTimestamp(value metav1 // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -170,11 +170,11 @@ func (b *IngressControllerApplyConfiguration) WithDeletionGracePeriodSeconds(val // overwriting an existing map entries in Labels field with the same key. func (b *IngressControllerApplyConfiguration) WithLabels(entries map[string]string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -185,11 +185,11 @@ func (b *IngressControllerApplyConfiguration) WithLabels(entries map[string]stri // overwriting an existing map entries in Annotations field with the same key. func (b *IngressControllerApplyConfiguration) WithAnnotations(entries map[string]string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -197,13 +197,13 @@ func (b *IngressControllerApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *IngressControllerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressControllerApplyConfiguration { +func (b *IngressControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -214,14 +214,14 @@ func (b *IngressControllerApplyConfiguration) WithOwnerReferences(values ...*v1. func (b *IngressControllerApplyConfiguration) WithFinalizers(values ...string) *IngressControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *IngressControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -244,5 +244,5 @@ func (b *IngressControllerApplyConfiguration) WithStatus(value *IngressControlle // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressControllerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go index fd345b643..dbcd3d9e5 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go @@ -23,7 +23,7 @@ func IngressControllerCaptureHTTPCookie() *IngressControllerCaptureHTTPCookieApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MatchType field is set to the value of the last call. func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithMatchType(value operatorv1.CookieMatchType) *IngressControllerCaptureHTTPCookieApplyConfiguration { - b.MatchType = &value + b.IngressControllerCaptureHTTPCookieUnionApplyConfiguration.MatchType = &value return b } @@ -31,7 +31,7 @@ func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithMatchType(val // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithName(value string) *IngressControllerCaptureHTTPCookieApplyConfiguration { - b.Name = &value + b.IngressControllerCaptureHTTPCookieUnionApplyConfiguration.Name = &value return b } @@ -39,7 +39,7 @@ func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithName(value st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamePrefix field is set to the value of the last call. func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithNamePrefix(value string) *IngressControllerCaptureHTTPCookieApplyConfiguration { - b.NamePrefix = &value + b.IngressControllerCaptureHTTPCookieUnionApplyConfiguration.NamePrefix = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go index 57f9fb590..374621a87 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IngressControllerCaptureHTTPCookieUnionApplyConfiguration represents a declarative configuration of the IngressControllerCaptureHTTPCookieUnion type for use // with apply. type IngressControllerCaptureHTTPCookieUnionApplyConfiguration struct { - MatchType *v1.CookieMatchType `json:"matchType,omitempty"` - Name *string `json:"name,omitempty"` - NamePrefix *string `json:"namePrefix,omitempty"` + MatchType *operatorv1.CookieMatchType `json:"matchType,omitempty"` + Name *string `json:"name,omitempty"` + NamePrefix *string `json:"namePrefix,omitempty"` } // IngressControllerCaptureHTTPCookieUnionApplyConfiguration constructs a declarative configuration of the IngressControllerCaptureHTTPCookieUnion type for use with @@ -23,7 +23,7 @@ func IngressControllerCaptureHTTPCookieUnion() *IngressControllerCaptureHTTPCook // WithMatchType sets the MatchType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MatchType field is set to the value of the last call. -func (b *IngressControllerCaptureHTTPCookieUnionApplyConfiguration) WithMatchType(value v1.CookieMatchType) *IngressControllerCaptureHTTPCookieUnionApplyConfiguration { +func (b *IngressControllerCaptureHTTPCookieUnionApplyConfiguration) WithMatchType(value operatorv1.CookieMatchType) *IngressControllerCaptureHTTPCookieUnionApplyConfiguration { b.MatchType = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go index 2c5a51ffe..f6b146106 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IngressControllerHTTPHeaderActionUnionApplyConfiguration represents a declarative configuration of the IngressControllerHTTPHeaderActionUnion type for use // with apply. type IngressControllerHTTPHeaderActionUnionApplyConfiguration struct { - Type *v1.IngressControllerHTTPHeaderActionType `json:"type,omitempty"` + Type *operatorv1.IngressControllerHTTPHeaderActionType `json:"type,omitempty"` Set *IngressControllerSetHTTPHeaderApplyConfiguration `json:"set,omitempty"` } @@ -22,7 +22,7 @@ func IngressControllerHTTPHeaderActionUnion() *IngressControllerHTTPHeaderAction // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *IngressControllerHTTPHeaderActionUnionApplyConfiguration) WithType(value v1.IngressControllerHTTPHeaderActionType) *IngressControllerHTTPHeaderActionUnionApplyConfiguration { +func (b *IngressControllerHTTPHeaderActionUnionApplyConfiguration) WithType(value operatorv1.IngressControllerHTTPHeaderActionType) *IngressControllerHTTPHeaderActionUnionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go index f1381d7a7..a972c1fcb 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IngressControllerHTTPHeadersApplyConfiguration represents a declarative configuration of the IngressControllerHTTPHeaders type for use // with apply. type IngressControllerHTTPHeadersApplyConfiguration struct { - ForwardedHeaderPolicy *v1.IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` + ForwardedHeaderPolicy *operatorv1.IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` UniqueId *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration `json:"uniqueId,omitempty"` - HeaderNameCaseAdjustments []v1.IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` + HeaderNameCaseAdjustments []operatorv1.IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` Actions *IngressControllerHTTPHeaderActionsApplyConfiguration `json:"actions,omitempty"` } @@ -24,7 +24,7 @@ func IngressControllerHTTPHeaders() *IngressControllerHTTPHeadersApplyConfigurat // WithForwardedHeaderPolicy sets the ForwardedHeaderPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ForwardedHeaderPolicy field is set to the value of the last call. -func (b *IngressControllerHTTPHeadersApplyConfiguration) WithForwardedHeaderPolicy(value v1.IngressControllerHTTPHeaderPolicy) *IngressControllerHTTPHeadersApplyConfiguration { +func (b *IngressControllerHTTPHeadersApplyConfiguration) WithForwardedHeaderPolicy(value operatorv1.IngressControllerHTTPHeaderPolicy) *IngressControllerHTTPHeadersApplyConfiguration { b.ForwardedHeaderPolicy = &value return b } @@ -40,7 +40,7 @@ func (b *IngressControllerHTTPHeadersApplyConfiguration) WithUniqueId(value *Ing // WithHeaderNameCaseAdjustments adds the given value to the HeaderNameCaseAdjustments field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the HeaderNameCaseAdjustments field. -func (b *IngressControllerHTTPHeadersApplyConfiguration) WithHeaderNameCaseAdjustments(values ...v1.IngressControllerHTTPHeaderNameCaseAdjustment) *IngressControllerHTTPHeadersApplyConfiguration { +func (b *IngressControllerHTTPHeadersApplyConfiguration) WithHeaderNameCaseAdjustments(values ...operatorv1.IngressControllerHTTPHeaderNameCaseAdjustment) *IngressControllerHTTPHeadersApplyConfiguration { for i := range values { b.HeaderNameCaseAdjustments = append(b.HeaderNameCaseAdjustments, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go index fc57030c7..ae23fe636 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go @@ -3,8 +3,8 @@ package v1 import ( - v1 "github.com/openshift/api/config/v1" - apioperatorv1 "github.com/openshift/api/operator/v1" + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" @@ -13,23 +13,24 @@ import ( // IngressControllerSpecApplyConfiguration represents a declarative configuration of the IngressControllerSpec type for use // with apply. type IngressControllerSpecApplyConfiguration struct { - Domain *string `json:"domain,omitempty"` - HttpErrorCodePages *v1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` - Replicas *int32 `json:"replicas,omitempty"` - EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` - DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` - NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` - NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` - TLSSecurityProfile *v1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` - ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` - RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` - Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` - HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` - HTTPEmptyRequestsPolicy *apioperatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` - TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` - UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` - HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` + Domain *string `json:"domain,omitempty"` + HttpErrorCodePages *configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` + NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` + RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` + Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` + HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` + HTTPEmptyRequestsPolicy *operatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` + TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` + UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` + HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` + IdleConnectionTerminationPolicy *operatorv1.IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` } // IngressControllerSpecApplyConfiguration constructs a declarative configuration of the IngressControllerSpec type for use with @@ -49,7 +50,7 @@ func (b *IngressControllerSpecApplyConfiguration) WithDomain(value string) *Ingr // WithHttpErrorCodePages sets the HttpErrorCodePages field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HttpErrorCodePages field is set to the value of the last call. -func (b *IngressControllerSpecApplyConfiguration) WithHttpErrorCodePages(value v1.ConfigMapNameReference) *IngressControllerSpecApplyConfiguration { +func (b *IngressControllerSpecApplyConfiguration) WithHttpErrorCodePages(value configv1.ConfigMapNameReference) *IngressControllerSpecApplyConfiguration { b.HttpErrorCodePages = &value return b } @@ -105,7 +106,7 @@ func (b *IngressControllerSpecApplyConfiguration) WithNodePlacement(value *NodeP // WithTLSSecurityProfile sets the TLSSecurityProfile field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSSecurityProfile field is set to the value of the last call. -func (b *IngressControllerSpecApplyConfiguration) WithTLSSecurityProfile(value v1.TLSSecurityProfile) *IngressControllerSpecApplyConfiguration { +func (b *IngressControllerSpecApplyConfiguration) WithTLSSecurityProfile(value configv1.TLSSecurityProfile) *IngressControllerSpecApplyConfiguration { b.TLSSecurityProfile = &value return b } @@ -145,7 +146,7 @@ func (b *IngressControllerSpecApplyConfiguration) WithHTTPHeaders(value *Ingress // WithHTTPEmptyRequestsPolicy sets the HTTPEmptyRequestsPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTPEmptyRequestsPolicy field is set to the value of the last call. -func (b *IngressControllerSpecApplyConfiguration) WithHTTPEmptyRequestsPolicy(value apioperatorv1.HTTPEmptyRequestsPolicy) *IngressControllerSpecApplyConfiguration { +func (b *IngressControllerSpecApplyConfiguration) WithHTTPEmptyRequestsPolicy(value operatorv1.HTTPEmptyRequestsPolicy) *IngressControllerSpecApplyConfiguration { b.HTTPEmptyRequestsPolicy = &value return b } @@ -173,3 +174,11 @@ func (b *IngressControllerSpecApplyConfiguration) WithHTTPCompression(value *HTT b.HTTPCompression = value return b } + +// WithIdleConnectionTerminationPolicy sets the IdleConnectionTerminationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IdleConnectionTerminationPolicy field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithIdleConnectionTerminationPolicy(value operatorv1.IngressControllerConnectionTerminationPolicy) *IngressControllerSpecApplyConfiguration { + b.IdleConnectionTerminationPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go index 39a4ef113..122801cf1 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go @@ -3,25 +3,25 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // IngressControllerTuningOptionsApplyConfiguration represents a declarative configuration of the IngressControllerTuningOptions type for use // with apply. type IngressControllerTuningOptionsApplyConfiguration struct { - HeaderBufferBytes *int32 `json:"headerBufferBytes,omitempty"` - HeaderBufferMaxRewriteBytes *int32 `json:"headerBufferMaxRewriteBytes,omitempty"` - ThreadCount *int32 `json:"threadCount,omitempty"` - ClientTimeout *v1.Duration `json:"clientTimeout,omitempty"` - ClientFinTimeout *v1.Duration `json:"clientFinTimeout,omitempty"` - ServerTimeout *v1.Duration `json:"serverTimeout,omitempty"` - ServerFinTimeout *v1.Duration `json:"serverFinTimeout,omitempty"` - TunnelTimeout *v1.Duration `json:"tunnelTimeout,omitempty"` - ConnectTimeout *v1.Duration `json:"connectTimeout,omitempty"` - TLSInspectDelay *v1.Duration `json:"tlsInspectDelay,omitempty"` - HealthCheckInterval *v1.Duration `json:"healthCheckInterval,omitempty"` - MaxConnections *int32 `json:"maxConnections,omitempty"` - ReloadInterval *v1.Duration `json:"reloadInterval,omitempty"` + HeaderBufferBytes *int32 `json:"headerBufferBytes,omitempty"` + HeaderBufferMaxRewriteBytes *int32 `json:"headerBufferMaxRewriteBytes,omitempty"` + ThreadCount *int32 `json:"threadCount,omitempty"` + ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` + ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` + ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` + ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` + TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` + ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` + HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` + MaxConnections *int32 `json:"maxConnections,omitempty"` + ReloadInterval *metav1.Duration `json:"reloadInterval,omitempty"` } // IngressControllerTuningOptionsApplyConfiguration constructs a declarative configuration of the IngressControllerTuningOptions type for use with @@ -57,7 +57,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithThreadCount(value // WithClientTimeout sets the ClientTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ClientTimeout field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientTimeout(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.ClientTimeout = &value return b } @@ -65,7 +65,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientTimeout(val // WithClientFinTimeout sets the ClientFinTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ClientFinTimeout field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientFinTimeout(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientFinTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.ClientFinTimeout = &value return b } @@ -73,7 +73,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientFinTimeout( // WithServerTimeout sets the ServerTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ServerTimeout field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerTimeout(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.ServerTimeout = &value return b } @@ -81,7 +81,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerTimeout(val // WithServerFinTimeout sets the ServerFinTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ServerFinTimeout field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerFinTimeout(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerFinTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.ServerFinTimeout = &value return b } @@ -89,7 +89,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerFinTimeout( // WithTunnelTimeout sets the TunnelTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TunnelTimeout field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithTunnelTimeout(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithTunnelTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.TunnelTimeout = &value return b } @@ -97,7 +97,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithTunnelTimeout(val // WithConnectTimeout sets the ConnectTimeout field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConnectTimeout field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithConnectTimeout(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithConnectTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.ConnectTimeout = &value return b } @@ -105,7 +105,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithConnectTimeout(va // WithTLSInspectDelay sets the TLSInspectDelay field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSInspectDelay field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithTLSInspectDelay(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithTLSInspectDelay(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.TLSInspectDelay = &value return b } @@ -113,7 +113,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithTLSInspectDelay(v // WithHealthCheckInterval sets the HealthCheckInterval field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HealthCheckInterval field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithHealthCheckInterval(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithHealthCheckInterval(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.HealthCheckInterval = &value return b } @@ -129,7 +129,7 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithMaxConnections(va // WithReloadInterval sets the ReloadInterval field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReloadInterval field is set to the value of the last call. -func (b *IngressControllerTuningOptionsApplyConfiguration) WithReloadInterval(value v1.Duration) *IngressControllerTuningOptionsApplyConfiguration { +func (b *IngressControllerTuningOptionsApplyConfiguration) WithReloadInterval(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { b.ReloadInterval = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go index 1b3d80040..b694f1ca3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // InsightsOperatorApplyConfiguration represents a declarative configuration of the InsightsOperator type for use // with apply. type InsightsOperatorApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *InsightsOperatorSpecApplyConfiguration `json:"spec,omitempty"` - Status *InsightsOperatorStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsOperatorSpecApplyConfiguration `json:"spec,omitempty"` + Status *InsightsOperatorStatusApplyConfiguration `json:"status,omitempty"` } // InsightsOperator constructs a declarative configuration of the InsightsOperator type for use with @@ -41,18 +41,18 @@ func InsightsOperator(name string) *InsightsOperatorApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractInsightsOperator(insightsOperator *apioperatorv1.InsightsOperator, fieldManager string) (*InsightsOperatorApplyConfiguration, error) { +func ExtractInsightsOperator(insightsOperator *operatorv1.InsightsOperator, fieldManager string) (*InsightsOperatorApplyConfiguration, error) { return extractInsightsOperator(insightsOperator, fieldManager, "") } // ExtractInsightsOperatorStatus is the same as ExtractInsightsOperator except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractInsightsOperatorStatus(insightsOperator *apioperatorv1.InsightsOperator, fieldManager string) (*InsightsOperatorApplyConfiguration, error) { +func ExtractInsightsOperatorStatus(insightsOperator *operatorv1.InsightsOperator, fieldManager string) (*InsightsOperatorApplyConfiguration, error) { return extractInsightsOperator(insightsOperator, fieldManager, "status") } -func extractInsightsOperator(insightsOperator *apioperatorv1.InsightsOperator, fieldManager string, subresource string) (*InsightsOperatorApplyConfiguration, error) { +func extractInsightsOperator(insightsOperator *operatorv1.InsightsOperator, fieldManager string, subresource string) (*InsightsOperatorApplyConfiguration, error) { b := &InsightsOperatorApplyConfiguration{} err := managedfields.ExtractInto(insightsOperator, internal.Parser().Type("com.github.openshift.api.operator.v1.InsightsOperator"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractInsightsOperator(insightsOperator *apioperatorv1.InsightsOperator, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithKind(value string) *InsightsOperatorApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *InsightsOperatorApplyConfiguration) WithKind(value string) *InsightsOpe // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithAPIVersion(value string) *InsightsOperatorApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *InsightsOperatorApplyConfiguration) WithAPIVersion(value string) *Insig // If called multiple times, the Name field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithName(value string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *InsightsOperatorApplyConfiguration) WithName(value string) *InsightsOpe // If called multiple times, the GenerateName field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithGenerateName(value string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *InsightsOperatorApplyConfiguration) WithGenerateName(value string) *Ins // If called multiple times, the Namespace field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithNamespace(value string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *InsightsOperatorApplyConfiguration) WithNamespace(value string) *Insigh // If called multiple times, the UID field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithUID(value types.UID) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *InsightsOperatorApplyConfiguration) WithUID(value types.UID) *InsightsO // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithResourceVersion(value string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *InsightsOperatorApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithGeneration(value int64) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *InsightsOperatorApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InsightsOperatorApplyConfiguration { +func (b *InsightsOperatorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *InsightsOperatorApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InsightsOperatorApplyConfiguration { +func (b *InsightsOperatorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *InsightsOperatorApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *InsightsOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *InsightsOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *InsightsOperatorApplyConfiguration) WithLabels(entries map[string]string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *InsightsOperatorApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *InsightsOperatorApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *InsightsOperatorApplyConfiguration) WithAnnotations(entries map[string] // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *InsightsOperatorApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InsightsOperatorApplyConfiguration { +func (b *InsightsOperatorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *InsightsOperatorApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *InsightsOperatorApplyConfiguration) WithFinalizers(values ...string) *InsightsOperatorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *InsightsOperatorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *InsightsOperatorApplyConfiguration) WithStatus(value *InsightsOperatorS // GetName retrieves the value of the Name field in the declarative configuration. func (b *InsightsOperatorApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go index 4f9b356a9..c6085db4a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go @@ -23,7 +23,7 @@ func InsightsOperatorSpec() *InsightsOperatorSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *InsightsOperatorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *InsightsOperatorSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *InsightsOperatorSpecApplyConfiguration) WithManagementState(value opera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *InsightsOperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *InsightsOperatorSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *InsightsOperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.L // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *InsightsOperatorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *InsightsOperatorSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *InsightsOperatorSpecApplyConfiguration) WithOperatorLogLevel(value oper // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *InsightsOperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *InsightsOperatorSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *InsightsOperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides( // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *InsightsOperatorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *InsightsOperatorSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go index 2cb67f8da..2c679168d 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go @@ -20,7 +20,7 @@ func InsightsOperatorStatus() *InsightsOperatorStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *InsightsOperatorStatusApplyConfiguration) WithObservedGeneration(value int64) *InsightsOperatorStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -32,7 +32,7 @@ func (b *InsightsOperatorStatusApplyConfiguration) WithConditions(values ...*Ope if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -41,7 +41,7 @@ func (b *InsightsOperatorStatusApplyConfiguration) WithConditions(values ...*Ope // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *InsightsOperatorStatusApplyConfiguration) WithVersion(value string) *InsightsOperatorStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -49,7 +49,7 @@ func (b *InsightsOperatorStatusApplyConfiguration) WithVersion(value string) *In // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *InsightsOperatorStatusApplyConfiguration) WithReadyReplicas(value int32) *InsightsOperatorStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -57,7 +57,7 @@ func (b *InsightsOperatorStatusApplyConfiguration) WithReadyReplicas(value int32 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *InsightsOperatorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *InsightsOperatorStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -69,7 +69,7 @@ func (b *InsightsOperatorStatusApplyConfiguration) WithGenerations(values ...*Ge if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go index 63de379da..ce89fca0f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // InsightsReportApplyConfiguration represents a declarative configuration of the InsightsReport type for use // with apply. type InsightsReportApplyConfiguration struct { - DownloadedAt *v1.Time `json:"downloadedAt,omitempty"` + DownloadedAt *metav1.Time `json:"downloadedAt,omitempty"` HealthChecks []HealthCheckApplyConfiguration `json:"healthChecks,omitempty"` } @@ -22,7 +22,7 @@ func InsightsReport() *InsightsReportApplyConfiguration { // WithDownloadedAt sets the DownloadedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DownloadedAt field is set to the value of the last call. -func (b *InsightsReportApplyConfiguration) WithDownloadedAt(value v1.Time) *InsightsReportApplyConfiguration { +func (b *InsightsReportApplyConfiguration) WithDownloadedAt(value metav1.Time) *InsightsReportApplyConfiguration { b.DownloadedAt = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go index ad8f7bfc9..c2cbc3069 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IPAMConfigApplyConfiguration represents a declarative configuration of the IPAMConfig type for use // with apply. type IPAMConfigApplyConfiguration struct { - Type *v1.IPAMType `json:"type,omitempty"` + Type *operatorv1.IPAMType `json:"type,omitempty"` StaticIPAMConfig *StaticIPAMConfigApplyConfiguration `json:"staticIPAMConfig,omitempty"` } @@ -22,7 +22,7 @@ func IPAMConfig() *IPAMConfigApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *IPAMConfigApplyConfiguration) WithType(value v1.IPAMType) *IPAMConfigApplyConfiguration { +func (b *IPAMConfigApplyConfiguration) WithType(value operatorv1.IPAMType) *IPAMConfigApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go index 52334e2ac..c9bee3327 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IPFIXConfigApplyConfiguration represents a declarative configuration of the IPFIXConfig type for use // with apply. type IPFIXConfigApplyConfiguration struct { - Collectors []v1.IPPort `json:"collectors,omitempty"` + Collectors []operatorv1.IPPort `json:"collectors,omitempty"` } // IPFIXConfigApplyConfiguration constructs a declarative configuration of the IPFIXConfig type for use with @@ -21,7 +21,7 @@ func IPFIXConfig() *IPFIXConfigApplyConfiguration { // WithCollectors adds the given value to the Collectors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Collectors field. -func (b *IPFIXConfigApplyConfiguration) WithCollectors(values ...v1.IPPort) *IPFIXConfigApplyConfiguration { +func (b *IPFIXConfigApplyConfiguration) WithCollectors(values ...operatorv1.IPPort) *IPFIXConfigApplyConfiguration { for i := range values { b.Collectors = append(b.Collectors, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go index ebe738b9f..eb4fc9207 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go @@ -3,13 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // IPsecConfigApplyConfiguration represents a declarative configuration of the IPsecConfig type for use // with apply. type IPsecConfigApplyConfiguration struct { - Mode *v1.IPsecMode `json:"mode,omitempty"` + Mode *operatorv1.IPsecMode `json:"mode,omitempty"` + Full *IPsecFullModeConfigApplyConfiguration `json:"full,omitempty"` } // IPsecConfigApplyConfiguration constructs a declarative configuration of the IPsecConfig type for use with @@ -21,7 +22,15 @@ func IPsecConfig() *IPsecConfigApplyConfiguration { // WithMode sets the Mode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Mode field is set to the value of the last call. -func (b *IPsecConfigApplyConfiguration) WithMode(value v1.IPsecMode) *IPsecConfigApplyConfiguration { +func (b *IPsecConfigApplyConfiguration) WithMode(value operatorv1.IPsecMode) *IPsecConfigApplyConfiguration { b.Mode = &value return b } + +// WithFull sets the Full field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Full field is set to the value of the last call. +func (b *IPsecConfigApplyConfiguration) WithFull(value *IPsecFullModeConfigApplyConfiguration) *IPsecConfigApplyConfiguration { + b.Full = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go new file mode 100644 index 000000000..208a4229c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IPsecFullModeConfigApplyConfiguration represents a declarative configuration of the IPsecFullModeConfig type for use +// with apply. +type IPsecFullModeConfigApplyConfiguration struct { + Encapsulation *operatorv1.Encapsulation `json:"encapsulation,omitempty"` +} + +// IPsecFullModeConfigApplyConfiguration constructs a declarative configuration of the IPsecFullModeConfig type for use with +// apply. +func IPsecFullModeConfig() *IPsecFullModeConfigApplyConfiguration { + return &IPsecFullModeConfigApplyConfiguration{} +} + +// WithEncapsulation sets the Encapsulation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Encapsulation field is set to the value of the last call. +func (b *IPsecFullModeConfigApplyConfiguration) WithEncapsulation(value operatorv1.Encapsulation) *IPsecFullModeConfigApplyConfiguration { + b.Encapsulation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go index d4ee463df..8ff4292bc 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // KubeAPIServerApplyConfiguration represents a declarative configuration of the KubeAPIServer type for use // with apply. type KubeAPIServerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *KubeAPIServerSpecApplyConfiguration `json:"spec,omitempty"` - Status *KubeAPIServerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeAPIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeAPIServerStatusApplyConfiguration `json:"status,omitempty"` } // KubeAPIServer constructs a declarative configuration of the KubeAPIServer type for use with @@ -41,18 +41,18 @@ func KubeAPIServer(name string) *KubeAPIServerApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractKubeAPIServer(kubeAPIServer *apioperatorv1.KubeAPIServer, fieldManager string) (*KubeAPIServerApplyConfiguration, error) { +func ExtractKubeAPIServer(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager string) (*KubeAPIServerApplyConfiguration, error) { return extractKubeAPIServer(kubeAPIServer, fieldManager, "") } // ExtractKubeAPIServerStatus is the same as ExtractKubeAPIServer except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractKubeAPIServerStatus(kubeAPIServer *apioperatorv1.KubeAPIServer, fieldManager string) (*KubeAPIServerApplyConfiguration, error) { +func ExtractKubeAPIServerStatus(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager string) (*KubeAPIServerApplyConfiguration, error) { return extractKubeAPIServer(kubeAPIServer, fieldManager, "status") } -func extractKubeAPIServer(kubeAPIServer *apioperatorv1.KubeAPIServer, fieldManager string, subresource string) (*KubeAPIServerApplyConfiguration, error) { +func extractKubeAPIServer(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager string, subresource string) (*KubeAPIServerApplyConfiguration, error) { b := &KubeAPIServerApplyConfiguration{} err := managedfields.ExtractInto(kubeAPIServer, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeAPIServer"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractKubeAPIServer(kubeAPIServer *apioperatorv1.KubeAPIServer, fieldManag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithKind(value string) *KubeAPIServerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *KubeAPIServerApplyConfiguration) WithKind(value string) *KubeAPIServerA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithAPIVersion(value string) *KubeAPIServerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *KubeAPIServerApplyConfiguration) WithAPIVersion(value string) *KubeAPIS // If called multiple times, the Name field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithName(value string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *KubeAPIServerApplyConfiguration) WithName(value string) *KubeAPIServerA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithGenerateName(value string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *KubeAPIServerApplyConfiguration) WithGenerateName(value string) *KubeAP // If called multiple times, the Namespace field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithNamespace(value string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *KubeAPIServerApplyConfiguration) WithNamespace(value string) *KubeAPISe // If called multiple times, the UID field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithUID(value types.UID) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *KubeAPIServerApplyConfiguration) WithUID(value types.UID) *KubeAPIServe // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithResourceVersion(value string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *KubeAPIServerApplyConfiguration) WithResourceVersion(value string) *Kub // If called multiple times, the Generation field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithGeneration(value int64) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *KubeAPIServerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *KubeAPIServerApplyConfiguration { +func (b *KubeAPIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *KubeAPIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *KubeAPIServerApplyConfiguration { +func (b *KubeAPIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *KubeAPIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *KubeAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *KubeAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *KubeAPIServerApplyConfiguration) WithLabels(entries map[string]string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *KubeAPIServerApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *KubeAPIServerApplyConfiguration) WithAnnotations(entries map[string]string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *KubeAPIServerApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *KubeAPIServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *KubeAPIServerApplyConfiguration { +func (b *KubeAPIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *KubeAPIServerApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *KubeAPIServerApplyConfiguration) WithFinalizers(values ...string) *KubeAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *KubeAPIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *KubeAPIServerApplyConfiguration) WithStatus(value *KubeAPIServerStatusA // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeAPIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go index dba25a5cd..71b60a95b 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go @@ -23,7 +23,7 @@ func KubeAPIServerSpec() *KubeAPIServerSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeAPIServerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithManagementState(value operator // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeAPIServerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogL // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeAPIServerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operato // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeAPIServerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,7 +55,7 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(val // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeAPIServerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } @@ -63,7 +63,7 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.R // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithForceRedeploymentReason(value string) *KubeAPIServerSpecApplyConfiguration { - b.ForceRedeploymentReason = &value + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value return b } @@ -71,7 +71,7 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithForceRedeploymentReason(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FailedRevisionLimit field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *KubeAPIServerSpecApplyConfiguration { - b.FailedRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value return b } @@ -79,6 +79,6 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithFailedRevisionLimit(value int3 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. func (b *KubeAPIServerSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *KubeAPIServerSpecApplyConfiguration { - b.SucceededRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go index 033be6dba..ff65c5113 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go @@ -19,7 +19,7 @@ func KubeAPIServerStatus() *KubeAPIServerStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *KubeAPIServerStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeAPIServerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -31,7 +31,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithConditions(values ...*Operat if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -40,7 +40,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithConditions(values ...*Operat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *KubeAPIServerStatusApplyConfiguration) WithVersion(value string) *KubeAPIServerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -48,7 +48,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithVersion(value string) *KubeA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *KubeAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeAPIServerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -56,7 +56,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) * // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *KubeAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeAPIServerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -68,7 +68,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithGenerations(values ...*Gener if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } @@ -77,7 +77,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithGenerations(values ...*Gener // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. func (b *KubeAPIServerStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *KubeAPIServerStatusApplyConfiguration { - b.LatestAvailableRevisionReason = &value + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value return b } @@ -89,7 +89,7 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithNodeStatuses(values ...*Node if values[i] == nil { panic("nil value passed to WithNodeStatuses") } - b.NodeStatuses = append(b.NodeStatuses, *values[i]) + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go index d67f73dff..731b6793a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // KubeControllerManagerApplyConfiguration represents a declarative configuration of the KubeControllerManager type for use // with apply. type KubeControllerManagerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *KubeControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` - Status *KubeControllerManagerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeControllerManagerStatusApplyConfiguration `json:"status,omitempty"` } // KubeControllerManager constructs a declarative configuration of the KubeControllerManager type for use with @@ -41,18 +41,18 @@ func KubeControllerManager(name string) *KubeControllerManagerApplyConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractKubeControllerManager(kubeControllerManager *apioperatorv1.KubeControllerManager, fieldManager string) (*KubeControllerManagerApplyConfiguration, error) { +func ExtractKubeControllerManager(kubeControllerManager *operatorv1.KubeControllerManager, fieldManager string) (*KubeControllerManagerApplyConfiguration, error) { return extractKubeControllerManager(kubeControllerManager, fieldManager, "") } // ExtractKubeControllerManagerStatus is the same as ExtractKubeControllerManager except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractKubeControllerManagerStatus(kubeControllerManager *apioperatorv1.KubeControllerManager, fieldManager string) (*KubeControllerManagerApplyConfiguration, error) { +func ExtractKubeControllerManagerStatus(kubeControllerManager *operatorv1.KubeControllerManager, fieldManager string) (*KubeControllerManagerApplyConfiguration, error) { return extractKubeControllerManager(kubeControllerManager, fieldManager, "status") } -func extractKubeControllerManager(kubeControllerManager *apioperatorv1.KubeControllerManager, fieldManager string, subresource string) (*KubeControllerManagerApplyConfiguration, error) { +func extractKubeControllerManager(kubeControllerManager *operatorv1.KubeControllerManager, fieldManager string, subresource string) (*KubeControllerManagerApplyConfiguration, error) { b := &KubeControllerManagerApplyConfiguration{} err := managedfields.ExtractInto(kubeControllerManager, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeControllerManager"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractKubeControllerManager(kubeControllerManager *apioperatorv1.KubeContr // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithKind(value string) *KubeControllerManagerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithKind(value string) *KubeCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithAPIVersion(value string) *KubeControllerManagerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithName(value string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithName(value string) *KubeCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithGenerateName(value string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithNamespace(value string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithNamespace(value string) *K // If called multiple times, the UID field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithUID(value types.UID) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithUID(value types.UID) *Kube // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithResourceVersion(value string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *KubeControllerManagerApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithGeneration(value int64) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *KubeControllerManagerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *KubeControllerManagerApplyConfiguration { +func (b *KubeControllerManagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *KubeControllerManagerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *KubeControllerManagerApplyConfiguration { +func (b *KubeControllerManagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *KubeControllerManagerApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *KubeControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *KubeControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *KubeControllerManagerApplyConfiguration) WithLabels(entries map[string]string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *KubeControllerManagerApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *KubeControllerManagerApplyConfiguration) WithAnnotations(entries map[string]string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *KubeControllerManagerApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *KubeControllerManagerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *KubeControllerManagerApplyConfiguration { +func (b *KubeControllerManagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *KubeControllerManagerApplyConfiguration) WithOwnerReferences(values ... func (b *KubeControllerManagerApplyConfiguration) WithFinalizers(values ...string) *KubeControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *KubeControllerManagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *KubeControllerManagerApplyConfiguration) WithStatus(value *KubeControll // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeControllerManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go index 2e2923af0..8a5181578 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go @@ -24,7 +24,7 @@ func KubeControllerManagerSpec() *KubeControllerManagerSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeControllerManagerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -32,7 +32,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithManagementState(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeControllerManagerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -40,7 +40,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithLogLevel(value operato // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeControllerManagerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -48,7 +48,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeControllerManagerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -56,7 +56,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverr // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeControllerManagerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } @@ -64,7 +64,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithObservedConfig(value r // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithForceRedeploymentReason(value string) *KubeControllerManagerSpecApplyConfiguration { - b.ForceRedeploymentReason = &value + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value return b } @@ -72,7 +72,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithForceRedeploymentReaso // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FailedRevisionLimit field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *KubeControllerManagerSpecApplyConfiguration { - b.FailedRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value return b } @@ -80,7 +80,7 @@ func (b *KubeControllerManagerSpecApplyConfiguration) WithFailedRevisionLimit(va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. func (b *KubeControllerManagerSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *KubeControllerManagerSpecApplyConfiguration { - b.SucceededRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go index 5c11f6622..1c72dff26 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go @@ -18,7 +18,7 @@ func KubeControllerManagerStatus() *KubeControllerManagerStatusApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *KubeControllerManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeControllerManagerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithConditions(values .. if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithConditions(values .. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *KubeControllerManagerStatusApplyConfiguration) WithVersion(value string) *KubeControllerManagerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithVersion(value string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *KubeControllerManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeControllerManagerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithReadyReplicas(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *KubeControllerManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeControllerManagerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithGenerations(values . if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } @@ -76,7 +76,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithGenerations(values . // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. func (b *KubeControllerManagerStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *KubeControllerManagerStatusApplyConfiguration { - b.LatestAvailableRevisionReason = &value + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value return b } @@ -88,7 +88,7 @@ func (b *KubeControllerManagerStatusApplyConfiguration) WithNodeStatuses(values if values[i] == nil { panic("nil value passed to WithNodeStatuses") } - b.NodeStatuses = append(b.NodeStatuses, *values[i]) + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go index 27f969980..77e6ca343 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // KubeSchedulerApplyConfiguration represents a declarative configuration of the KubeScheduler type for use // with apply. type KubeSchedulerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *KubeSchedulerSpecApplyConfiguration `json:"spec,omitempty"` - Status *KubeSchedulerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeSchedulerSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeSchedulerStatusApplyConfiguration `json:"status,omitempty"` } // KubeScheduler constructs a declarative configuration of the KubeScheduler type for use with @@ -41,18 +41,18 @@ func KubeScheduler(name string) *KubeSchedulerApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractKubeScheduler(kubeScheduler *apioperatorv1.KubeScheduler, fieldManager string) (*KubeSchedulerApplyConfiguration, error) { +func ExtractKubeScheduler(kubeScheduler *operatorv1.KubeScheduler, fieldManager string) (*KubeSchedulerApplyConfiguration, error) { return extractKubeScheduler(kubeScheduler, fieldManager, "") } // ExtractKubeSchedulerStatus is the same as ExtractKubeScheduler except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractKubeSchedulerStatus(kubeScheduler *apioperatorv1.KubeScheduler, fieldManager string) (*KubeSchedulerApplyConfiguration, error) { +func ExtractKubeSchedulerStatus(kubeScheduler *operatorv1.KubeScheduler, fieldManager string) (*KubeSchedulerApplyConfiguration, error) { return extractKubeScheduler(kubeScheduler, fieldManager, "status") } -func extractKubeScheduler(kubeScheduler *apioperatorv1.KubeScheduler, fieldManager string, subresource string) (*KubeSchedulerApplyConfiguration, error) { +func extractKubeScheduler(kubeScheduler *operatorv1.KubeScheduler, fieldManager string, subresource string) (*KubeSchedulerApplyConfiguration, error) { b := &KubeSchedulerApplyConfiguration{} err := managedfields.ExtractInto(kubeScheduler, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeScheduler"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractKubeScheduler(kubeScheduler *apioperatorv1.KubeScheduler, fieldManag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithKind(value string) *KubeSchedulerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *KubeSchedulerApplyConfiguration) WithKind(value string) *KubeSchedulerA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithAPIVersion(value string) *KubeSchedulerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *KubeSchedulerApplyConfiguration) WithAPIVersion(value string) *KubeSche // If called multiple times, the Name field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithName(value string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *KubeSchedulerApplyConfiguration) WithName(value string) *KubeSchedulerA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithGenerateName(value string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *KubeSchedulerApplyConfiguration) WithGenerateName(value string) *KubeSc // If called multiple times, the Namespace field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithNamespace(value string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *KubeSchedulerApplyConfiguration) WithNamespace(value string) *KubeSched // If called multiple times, the UID field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithUID(value types.UID) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *KubeSchedulerApplyConfiguration) WithUID(value types.UID) *KubeSchedule // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithResourceVersion(value string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *KubeSchedulerApplyConfiguration) WithResourceVersion(value string) *Kub // If called multiple times, the Generation field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithGeneration(value int64) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *KubeSchedulerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *KubeSchedulerApplyConfiguration { +func (b *KubeSchedulerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *KubeSchedulerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *KubeSchedulerApplyConfiguration { +func (b *KubeSchedulerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *KubeSchedulerApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *KubeSchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *KubeSchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *KubeSchedulerApplyConfiguration) WithLabels(entries map[string]string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *KubeSchedulerApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *KubeSchedulerApplyConfiguration) WithAnnotations(entries map[string]string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *KubeSchedulerApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *KubeSchedulerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *KubeSchedulerApplyConfiguration { +func (b *KubeSchedulerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *KubeSchedulerApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *KubeSchedulerApplyConfiguration) WithFinalizers(values ...string) *KubeSchedulerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *KubeSchedulerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *KubeSchedulerApplyConfiguration) WithStatus(value *KubeSchedulerStatusA // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeSchedulerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go index 44d8eeab2..94bd1d61f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go @@ -23,7 +23,7 @@ func KubeSchedulerSpec() *KubeSchedulerSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeSchedulerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithManagementState(value operator // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeSchedulerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogL // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeSchedulerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithOperatorLogLevel(value operato // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeSchedulerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,7 +55,7 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithUnsupportedConfigOverrides(val // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeSchedulerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } @@ -63,7 +63,7 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithObservedConfig(value runtime.R // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithForceRedeploymentReason(value string) *KubeSchedulerSpecApplyConfiguration { - b.ForceRedeploymentReason = &value + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value return b } @@ -71,7 +71,7 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithForceRedeploymentReason(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FailedRevisionLimit field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *KubeSchedulerSpecApplyConfiguration { - b.FailedRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value return b } @@ -79,6 +79,6 @@ func (b *KubeSchedulerSpecApplyConfiguration) WithFailedRevisionLimit(value int3 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. func (b *KubeSchedulerSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *KubeSchedulerSpecApplyConfiguration { - b.SucceededRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go index e6784996f..821b4c3f9 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go @@ -18,7 +18,7 @@ func KubeSchedulerStatus() *KubeSchedulerStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *KubeSchedulerStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeSchedulerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithConditions(values ...*Operat if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithConditions(values ...*Operat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *KubeSchedulerStatusApplyConfiguration) WithVersion(value string) *KubeSchedulerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithVersion(value string) *KubeS // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *KubeSchedulerStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeSchedulerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithReadyReplicas(value int32) * // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *KubeSchedulerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeSchedulerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithGenerations(values ...*Gener if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } @@ -76,7 +76,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithGenerations(values ...*Gener // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. func (b *KubeSchedulerStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *KubeSchedulerStatusApplyConfiguration { - b.LatestAvailableRevisionReason = &value + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value return b } @@ -88,7 +88,7 @@ func (b *KubeSchedulerStatusApplyConfiguration) WithNodeStatuses(values ...*Node if values[i] == nil { panic("nil value passed to WithNodeStatuses") } - b.NodeStatuses = append(b.NodeStatuses, *values[i]) + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go index d0f693f9d..5c84a133f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // KubeStorageVersionMigratorApplyConfiguration represents a declarative configuration of the KubeStorageVersionMigrator type for use // with apply. type KubeStorageVersionMigratorApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *KubeStorageVersionMigratorSpecApplyConfiguration `json:"spec,omitempty"` - Status *KubeStorageVersionMigratorStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeStorageVersionMigratorSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeStorageVersionMigratorStatusApplyConfiguration `json:"status,omitempty"` } // KubeStorageVersionMigrator constructs a declarative configuration of the KubeStorageVersionMigrator type for use with @@ -41,18 +41,18 @@ func KubeStorageVersionMigrator(name string) *KubeStorageVersionMigratorApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractKubeStorageVersionMigrator(kubeStorageVersionMigrator *apioperatorv1.KubeStorageVersionMigrator, fieldManager string) (*KubeStorageVersionMigratorApplyConfiguration, error) { +func ExtractKubeStorageVersionMigrator(kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, fieldManager string) (*KubeStorageVersionMigratorApplyConfiguration, error) { return extractKubeStorageVersionMigrator(kubeStorageVersionMigrator, fieldManager, "") } // ExtractKubeStorageVersionMigratorStatus is the same as ExtractKubeStorageVersionMigrator except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractKubeStorageVersionMigratorStatus(kubeStorageVersionMigrator *apioperatorv1.KubeStorageVersionMigrator, fieldManager string) (*KubeStorageVersionMigratorApplyConfiguration, error) { +func ExtractKubeStorageVersionMigratorStatus(kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, fieldManager string) (*KubeStorageVersionMigratorApplyConfiguration, error) { return extractKubeStorageVersionMigrator(kubeStorageVersionMigrator, fieldManager, "status") } -func extractKubeStorageVersionMigrator(kubeStorageVersionMigrator *apioperatorv1.KubeStorageVersionMigrator, fieldManager string, subresource string) (*KubeStorageVersionMigratorApplyConfiguration, error) { +func extractKubeStorageVersionMigrator(kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, fieldManager string, subresource string) (*KubeStorageVersionMigratorApplyConfiguration, error) { b := &KubeStorageVersionMigratorApplyConfiguration{} err := managedfields.ExtractInto(kubeStorageVersionMigrator, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeStorageVersionMigrator"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractKubeStorageVersionMigrator(kubeStorageVersionMigrator *apioperatorv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithKind(value string) *KubeStorageVersionMigratorApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithKind(value string) *K // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithAPIVersion(value string) *KubeStorageVersionMigratorApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithName(value string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithName(value string) *K // If called multiple times, the GenerateName field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithGenerateName(value string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithNamespace(value string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithUID(value types.UID) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithResourceVersion(value string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithGeneration(value int64) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *KubeStorageVersionMigratorApplyConfiguration) WithCreationTimestamp(value metav1.Time) *KubeStorageVersionMigratorApplyConfiguration { +func (b *KubeStorageVersionMigratorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *KubeStorageVersionMigratorApplyConfiguration { +func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *KubeStorageVersionMigratorApplyConfiguration) WithLabels(entries map[string]string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *KubeStorageVersionMigratorApplyConfiguration) WithAnnotations(entries map[string]string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithAnnotations(entries m // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *KubeStorageVersionMigratorApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *KubeStorageVersionMigratorApplyConfiguration { +func (b *KubeStorageVersionMigratorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithOwnerReferences(value func (b *KubeStorageVersionMigratorApplyConfiguration) WithFinalizers(values ...string) *KubeStorageVersionMigratorApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *KubeStorageVersionMigratorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithStatus(value *KubeSto // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeStorageVersionMigratorApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go index 94e557659..6acfcb82b 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go @@ -23,7 +23,7 @@ func KubeStorageVersionMigratorSpec() *KubeStorageVersionMigratorSpecApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeStorageVersionMigratorSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithManagementState(v // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeStorageVersionMigratorSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithLogLevel(value op // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeStorageVersionMigratorSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithOperatorLogLevel( // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeStorageVersionMigratorSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithUnsupportedConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeStorageVersionMigratorSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go index 438067c25..cad8e2f76 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go @@ -18,7 +18,7 @@ func KubeStorageVersionMigratorStatus() *KubeStorageVersionMigratorStatusApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeStorageVersionMigratorStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithConditions(valu if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithConditions(valu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithVersion(value string) *KubeStorageVersionMigratorStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithVersion(value s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeStorageVersionMigratorStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithReadyReplicas(v // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeStorageVersionMigratorStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithGenerations(val if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go index 46941714c..b8e83a02c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go @@ -3,16 +3,16 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // LoadBalancerStrategyApplyConfiguration represents a declarative configuration of the LoadBalancerStrategy type for use // with apply. type LoadBalancerStrategyApplyConfiguration struct { - Scope *v1.LoadBalancerScope `json:"scope,omitempty"` - AllowedSourceRanges []v1.CIDR `json:"allowedSourceRanges,omitempty"` + Scope *operatorv1.LoadBalancerScope `json:"scope,omitempty"` + AllowedSourceRanges []operatorv1.CIDR `json:"allowedSourceRanges,omitempty"` ProviderParameters *ProviderLoadBalancerParametersApplyConfiguration `json:"providerParameters,omitempty"` - DNSManagementPolicy *v1.LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` + DNSManagementPolicy *operatorv1.LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` } // LoadBalancerStrategyApplyConfiguration constructs a declarative configuration of the LoadBalancerStrategy type for use with @@ -24,7 +24,7 @@ func LoadBalancerStrategy() *LoadBalancerStrategyApplyConfiguration { // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *LoadBalancerStrategyApplyConfiguration) WithScope(value v1.LoadBalancerScope) *LoadBalancerStrategyApplyConfiguration { +func (b *LoadBalancerStrategyApplyConfiguration) WithScope(value operatorv1.LoadBalancerScope) *LoadBalancerStrategyApplyConfiguration { b.Scope = &value return b } @@ -32,7 +32,7 @@ func (b *LoadBalancerStrategyApplyConfiguration) WithScope(value v1.LoadBalancer // WithAllowedSourceRanges adds the given value to the AllowedSourceRanges field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AllowedSourceRanges field. -func (b *LoadBalancerStrategyApplyConfiguration) WithAllowedSourceRanges(values ...v1.CIDR) *LoadBalancerStrategyApplyConfiguration { +func (b *LoadBalancerStrategyApplyConfiguration) WithAllowedSourceRanges(values ...operatorv1.CIDR) *LoadBalancerStrategyApplyConfiguration { for i := range values { b.AllowedSourceRanges = append(b.AllowedSourceRanges, values[i]) } @@ -50,7 +50,7 @@ func (b *LoadBalancerStrategyApplyConfiguration) WithProviderParameters(value *P // WithDNSManagementPolicy sets the DNSManagementPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DNSManagementPolicy field is set to the value of the last call. -func (b *LoadBalancerStrategyApplyConfiguration) WithDNSManagementPolicy(value v1.LoadBalancerDNSManagementPolicy) *LoadBalancerStrategyApplyConfiguration { +func (b *LoadBalancerStrategyApplyConfiguration) WithDNSManagementPolicy(value operatorv1.LoadBalancerDNSManagementPolicy) *LoadBalancerStrategyApplyConfiguration { b.DNSManagementPolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go index de06e9cc4..36a7bd5c5 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // LoggingDestinationApplyConfiguration represents a declarative configuration of the LoggingDestination type for use // with apply. type LoggingDestinationApplyConfiguration struct { - Type *v1.LoggingDestinationType `json:"type,omitempty"` + Type *operatorv1.LoggingDestinationType `json:"type,omitempty"` Syslog *SyslogLoggingDestinationParametersApplyConfiguration `json:"syslog,omitempty"` Container *ContainerLoggingDestinationParametersApplyConfiguration `json:"container,omitempty"` } @@ -23,7 +23,7 @@ func LoggingDestination() *LoggingDestinationApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LoggingDestinationApplyConfiguration) WithType(value v1.LoggingDestinationType) *LoggingDestinationApplyConfiguration { +func (b *LoggingDestinationApplyConfiguration) WithType(value operatorv1.LoggingDestinationType) *LoggingDestinationApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go index 36b1d6d23..35d2b867e 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // MachineConfigurationApplyConfiguration represents a declarative configuration of the MachineConfiguration type for use // with apply. type MachineConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *MachineConfigurationSpecApplyConfiguration `json:"spec,omitempty"` - Status *MachineConfigurationStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MachineConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *MachineConfigurationStatusApplyConfiguration `json:"status,omitempty"` } // MachineConfiguration constructs a declarative configuration of the MachineConfiguration type for use with @@ -41,18 +41,18 @@ func MachineConfiguration(name string) *MachineConfigurationApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractMachineConfiguration(machineConfiguration *apioperatorv1.MachineConfiguration, fieldManager string) (*MachineConfigurationApplyConfiguration, error) { +func ExtractMachineConfiguration(machineConfiguration *operatorv1.MachineConfiguration, fieldManager string) (*MachineConfigurationApplyConfiguration, error) { return extractMachineConfiguration(machineConfiguration, fieldManager, "") } // ExtractMachineConfigurationStatus is the same as ExtractMachineConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractMachineConfigurationStatus(machineConfiguration *apioperatorv1.MachineConfiguration, fieldManager string) (*MachineConfigurationApplyConfiguration, error) { +func ExtractMachineConfigurationStatus(machineConfiguration *operatorv1.MachineConfiguration, fieldManager string) (*MachineConfigurationApplyConfiguration, error) { return extractMachineConfiguration(machineConfiguration, fieldManager, "status") } -func extractMachineConfiguration(machineConfiguration *apioperatorv1.MachineConfiguration, fieldManager string, subresource string) (*MachineConfigurationApplyConfiguration, error) { +func extractMachineConfiguration(machineConfiguration *operatorv1.MachineConfiguration, fieldManager string, subresource string) (*MachineConfigurationApplyConfiguration, error) { b := &MachineConfigurationApplyConfiguration{} err := managedfields.ExtractInto(machineConfiguration, internal.Parser().Type("com.github.openshift.api.operator.v1.MachineConfiguration"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractMachineConfiguration(machineConfiguration *apioperatorv1.MachineConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithKind(value string) *MachineConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *MachineConfigurationApplyConfiguration) WithKind(value string) *Machine // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithAPIVersion(value string) *MachineConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *MachineConfigurationApplyConfiguration) WithAPIVersion(value string) *M // If called multiple times, the Name field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithName(value string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *MachineConfigurationApplyConfiguration) WithName(value string) *Machine // If called multiple times, the GenerateName field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithGenerateName(value string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *MachineConfigurationApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithNamespace(value string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *MachineConfigurationApplyConfiguration) WithNamespace(value string) *Ma // If called multiple times, the UID field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithUID(value types.UID) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *MachineConfigurationApplyConfiguration) WithUID(value types.UID) *Machi // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithResourceVersion(value string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *MachineConfigurationApplyConfiguration) WithResourceVersion(value strin // If called multiple times, the Generation field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithGeneration(value int64) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *MachineConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MachineConfigurationApplyConfiguration { +func (b *MachineConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *MachineConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MachineConfigurationApplyConfiguration { +func (b *MachineConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *MachineConfigurationApplyConfiguration) WithDeletionTimestamp(value met // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *MachineConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *MachineConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds( // overwriting an existing map entries in Labels field with the same key. func (b *MachineConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *MachineConfigurationApplyConfiguration) WithLabels(entries map[string]s // overwriting an existing map entries in Annotations field with the same key. func (b *MachineConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *MachineConfigurationApplyConfiguration) WithAnnotations(entries map[str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *MachineConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MachineConfigurationApplyConfiguration { +func (b *MachineConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *MachineConfigurationApplyConfiguration) WithOwnerReferences(values ...* func (b *MachineConfigurationApplyConfiguration) WithFinalizers(values ...string) *MachineConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *MachineConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *MachineConfigurationApplyConfiguration) WithStatus(value *MachineConfig // GetName retrieves the value of the Name field in the declarative configuration. func (b *MachineConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go index 416406329..cee3c69fc 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go @@ -25,7 +25,7 @@ func MachineConfigurationSpec() *MachineConfigurationSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *MachineConfigurationSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -33,7 +33,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithManagementState(value o // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *MachineConfigurationSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -41,7 +41,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithLogLevel(value operator // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *MachineConfigurationSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -49,7 +49,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithOperatorLogLevel(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *MachineConfigurationSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -57,7 +57,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithUnsupportedConfigOverri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *MachineConfigurationSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } @@ -65,7 +65,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithObservedConfig(value ru // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithForceRedeploymentReason(value string) *MachineConfigurationSpecApplyConfiguration { - b.ForceRedeploymentReason = &value + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value return b } @@ -73,7 +73,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithForceRedeploymentReason // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FailedRevisionLimit field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *MachineConfigurationSpecApplyConfiguration { - b.FailedRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value return b } @@ -81,7 +81,7 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithFailedRevisionLimit(val // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. func (b *MachineConfigurationSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *MachineConfigurationSpecApplyConfiguration { - b.SucceededRevisionLimit = &value + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go index 95956ac09..ad3884788 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // MachineConfigurationStatusApplyConfiguration represents a declarative configuration of the MachineConfigurationStatus type for use // with apply. type MachineConfigurationStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` NodeDisruptionPolicyStatus *NodeDisruptionPolicyStatusApplyConfiguration `json:"nodeDisruptionPolicyStatus,omitempty"` } @@ -31,7 +31,7 @@ func (b *MachineConfigurationStatusApplyConfiguration) WithObservedGeneration(va // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *MachineConfigurationStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *MachineConfigurationStatusApplyConfiguration { +func (b *MachineConfigurationStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *MachineConfigurationStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go index 2764cecdc..d4a9f3c2c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // MachineManagerApplyConfiguration represents a declarative configuration of the MachineManager type for use // with apply. type MachineManagerApplyConfiguration struct { - Resource *v1.MachineManagerMachineSetsResourceType `json:"resource,omitempty"` - APIGroup *v1.MachineManagerMachineSetsAPIGroupType `json:"apiGroup,omitempty"` - Selection *MachineManagerSelectorApplyConfiguration `json:"selection,omitempty"` + Resource *operatorv1.MachineManagerMachineSetsResourceType `json:"resource,omitempty"` + APIGroup *operatorv1.MachineManagerMachineSetsAPIGroupType `json:"apiGroup,omitempty"` + Selection *MachineManagerSelectorApplyConfiguration `json:"selection,omitempty"` } // MachineManagerApplyConfiguration constructs a declarative configuration of the MachineManager type for use with @@ -23,7 +23,7 @@ func MachineManager() *MachineManagerApplyConfiguration { // WithResource sets the Resource field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resource field is set to the value of the last call. -func (b *MachineManagerApplyConfiguration) WithResource(value v1.MachineManagerMachineSetsResourceType) *MachineManagerApplyConfiguration { +func (b *MachineManagerApplyConfiguration) WithResource(value operatorv1.MachineManagerMachineSetsResourceType) *MachineManagerApplyConfiguration { b.Resource = &value return b } @@ -31,7 +31,7 @@ func (b *MachineManagerApplyConfiguration) WithResource(value v1.MachineManagerM // WithAPIGroup sets the APIGroup field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIGroup field is set to the value of the last call. -func (b *MachineManagerApplyConfiguration) WithAPIGroup(value v1.MachineManagerMachineSetsAPIGroupType) *MachineManagerApplyConfiguration { +func (b *MachineManagerApplyConfiguration) WithAPIGroup(value operatorv1.MachineManagerMachineSetsAPIGroupType) *MachineManagerApplyConfiguration { b.APIGroup = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go index ef88c772f..3bb44f21c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // MachineManagerSelectorApplyConfiguration represents a declarative configuration of the MachineManagerSelector type for use // with apply. type MachineManagerSelectorApplyConfiguration struct { - Mode *v1.MachineManagerSelectorMode `json:"mode,omitempty"` - Partial *PartialSelectorApplyConfiguration `json:"partial,omitempty"` + Mode *operatorv1.MachineManagerSelectorMode `json:"mode,omitempty"` + Partial *PartialSelectorApplyConfiguration `json:"partial,omitempty"` } // MachineManagerSelectorApplyConfiguration constructs a declarative configuration of the MachineManagerSelector type for use with @@ -22,7 +22,7 @@ func MachineManagerSelector() *MachineManagerSelectorApplyConfiguration { // WithMode sets the Mode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Mode field is set to the value of the last call. -func (b *MachineManagerSelectorApplyConfiguration) WithMode(value v1.MachineManagerSelectorMode) *MachineManagerSelectorApplyConfiguration { +func (b *MachineManagerSelectorApplyConfiguration) WithMode(value operatorv1.MachineManagerSelectorMode) *MachineManagerSelectorApplyConfiguration { b.Mode = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go index 34d2bb365..868906043 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // NetFlowConfigApplyConfiguration represents a declarative configuration of the NetFlowConfig type for use // with apply. type NetFlowConfigApplyConfiguration struct { - Collectors []v1.IPPort `json:"collectors,omitempty"` + Collectors []operatorv1.IPPort `json:"collectors,omitempty"` } // NetFlowConfigApplyConfiguration constructs a declarative configuration of the NetFlowConfig type for use with @@ -21,7 +21,7 @@ func NetFlowConfig() *NetFlowConfigApplyConfiguration { // WithCollectors adds the given value to the Collectors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Collectors field. -func (b *NetFlowConfigApplyConfiguration) WithCollectors(values ...v1.IPPort) *NetFlowConfigApplyConfiguration { +func (b *NetFlowConfigApplyConfiguration) WithCollectors(values ...operatorv1.IPPort) *NetFlowConfigApplyConfiguration { for i := range values { b.Collectors = append(b.Collectors, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go index fd66e0065..0bdf453af 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NetworkApplyConfiguration represents a declarative configuration of the Network type for use // with apply. type NetworkApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"` - Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"` + Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` } // Network constructs a declarative configuration of the Network type for use with @@ -41,18 +41,18 @@ func Network(name string) *NetworkApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNetwork(network *apioperatorv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { +func ExtractNetwork(network *operatorv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { return extractNetwork(network, fieldManager, "") } // ExtractNetworkStatus is the same as ExtractNetwork except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNetworkStatus(network *apioperatorv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { +func ExtractNetworkStatus(network *operatorv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { return extractNetwork(network, fieldManager, "status") } -func extractNetwork(network *apioperatorv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) { +func extractNetwork(network *operatorv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) { b := &NetworkApplyConfiguration{} err := managedfields.ExtractInto(network, internal.Parser().Type("com.github.openshift.api.operator.v1.Network"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractNetwork(network *apioperatorv1.Network, fieldManager string, subreso // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkAp // If called multiple times, the Generation field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithGeneration(value int64) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NetworkApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkApplyConfiguration { +func (b *NetworkApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkApplyConfiguration { +func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Ne // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *Netwo // overwriting an existing map entries in Annotations field with the same key. func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration { +func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *NetworkApplyConfiguration) WithFinalizers(values ...string) *NetworkApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *NetworkApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go index a13439542..66803aa95 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go @@ -35,7 +35,7 @@ func NetworkSpec() *NetworkSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *NetworkSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *NetworkSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -43,7 +43,7 @@ func (b *NetworkSpecApplyConfiguration) WithManagementState(value operatorv1.Man // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *NetworkSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *NetworkSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -51,7 +51,7 @@ func (b *NetworkSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *NetworkSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *NetworkSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -59,7 +59,7 @@ func (b *NetworkSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.Lo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *NetworkSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *NetworkSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -67,7 +67,7 @@ func (b *NetworkSpecApplyConfiguration) WithUnsupportedConfigOverrides(value run // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *NetworkSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *NetworkSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go index 85d74e0db..9753b2161 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go @@ -18,7 +18,7 @@ func NetworkStatus() *NetworkStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *NetworkStatusApplyConfiguration) WithObservedGeneration(value int64) *NetworkStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *NetworkStatusApplyConfiguration) WithConditions(values ...*OperatorCond if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *NetworkStatusApplyConfiguration) WithConditions(values ...*OperatorCond // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *NetworkStatusApplyConfiguration) WithVersion(value string) *NetworkStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *NetworkStatusApplyConfiguration) WithVersion(value string) *NetworkStat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *NetworkStatusApplyConfiguration) WithReadyReplicas(value int32) *NetworkStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *NetworkStatusApplyConfiguration) WithReadyReplicas(value int32) *Networ // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *NetworkStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *NetworkStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *NetworkStatusApplyConfiguration) WithGenerations(values ...*GenerationS if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go index fb5fb5443..2421469d8 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // NodeDisruptionPolicySpecActionApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicySpecAction type for use // with apply. type NodeDisruptionPolicySpecActionApplyConfiguration struct { - Type *v1.NodeDisruptionPolicySpecActionType `json:"type,omitempty"` - Reload *ReloadServiceApplyConfiguration `json:"reload,omitempty"` - Restart *RestartServiceApplyConfiguration `json:"restart,omitempty"` + Type *operatorv1.NodeDisruptionPolicySpecActionType `json:"type,omitempty"` + Reload *ReloadServiceApplyConfiguration `json:"reload,omitempty"` + Restart *RestartServiceApplyConfiguration `json:"restart,omitempty"` } // NodeDisruptionPolicySpecActionApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicySpecAction type for use with @@ -23,7 +23,7 @@ func NodeDisruptionPolicySpecAction() *NodeDisruptionPolicySpecActionApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NodeDisruptionPolicySpecActionApplyConfiguration) WithType(value v1.NodeDisruptionPolicySpecActionType) *NodeDisruptionPolicySpecActionApplyConfiguration { +func (b *NodeDisruptionPolicySpecActionApplyConfiguration) WithType(value operatorv1.NodeDisruptionPolicySpecActionType) *NodeDisruptionPolicySpecActionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go index 6f62f343a..d368f1c0c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // NodeDisruptionPolicySpecUnitApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicySpecUnit type for use // with apply. type NodeDisruptionPolicySpecUnitApplyConfiguration struct { - Name *v1.NodeDisruptionPolicyServiceName `json:"name,omitempty"` + Name *operatorv1.NodeDisruptionPolicyServiceName `json:"name,omitempty"` Actions []NodeDisruptionPolicySpecActionApplyConfiguration `json:"actions,omitempty"` } @@ -22,7 +22,7 @@ func NodeDisruptionPolicySpecUnit() *NodeDisruptionPolicySpecUnitApplyConfigurat // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *NodeDisruptionPolicySpecUnitApplyConfiguration) WithName(value v1.NodeDisruptionPolicyServiceName) *NodeDisruptionPolicySpecUnitApplyConfiguration { +func (b *NodeDisruptionPolicySpecUnitApplyConfiguration) WithName(value operatorv1.NodeDisruptionPolicyServiceName) *NodeDisruptionPolicySpecUnitApplyConfiguration { b.Name = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go index ec621575c..05afe97a6 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // NodeDisruptionPolicyStatusActionApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatusAction type for use // with apply. type NodeDisruptionPolicyStatusActionApplyConfiguration struct { - Type *v1.NodeDisruptionPolicyStatusActionType `json:"type,omitempty"` - Reload *ReloadServiceApplyConfiguration `json:"reload,omitempty"` - Restart *RestartServiceApplyConfiguration `json:"restart,omitempty"` + Type *operatorv1.NodeDisruptionPolicyStatusActionType `json:"type,omitempty"` + Reload *ReloadServiceApplyConfiguration `json:"reload,omitempty"` + Restart *RestartServiceApplyConfiguration `json:"restart,omitempty"` } // NodeDisruptionPolicyStatusActionApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyStatusAction type for use with @@ -23,7 +23,7 @@ func NodeDisruptionPolicyStatusAction() *NodeDisruptionPolicyStatusActionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NodeDisruptionPolicyStatusActionApplyConfiguration) WithType(value v1.NodeDisruptionPolicyStatusActionType) *NodeDisruptionPolicyStatusActionApplyConfiguration { +func (b *NodeDisruptionPolicyStatusActionApplyConfiguration) WithType(value operatorv1.NodeDisruptionPolicyStatusActionType) *NodeDisruptionPolicyStatusActionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go index f4dd5fac7..5d97a2661 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // NodeDisruptionPolicyStatusUnitApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatusUnit type for use // with apply. type NodeDisruptionPolicyStatusUnitApplyConfiguration struct { - Name *v1.NodeDisruptionPolicyServiceName `json:"name,omitempty"` + Name *operatorv1.NodeDisruptionPolicyServiceName `json:"name,omitempty"` Actions []NodeDisruptionPolicyStatusActionApplyConfiguration `json:"actions,omitempty"` } @@ -22,7 +22,7 @@ func NodeDisruptionPolicyStatusUnit() *NodeDisruptionPolicyStatusUnitApplyConfig // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *NodeDisruptionPolicyStatusUnitApplyConfiguration) WithName(value v1.NodeDisruptionPolicyServiceName) *NodeDisruptionPolicyStatusUnitApplyConfiguration { +func (b *NodeDisruptionPolicyStatusUnitApplyConfiguration) WithName(value operatorv1.NodeDisruptionPolicyServiceName) *NodeDisruptionPolicyStatusUnitApplyConfiguration { b.Name = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go index 5f387ac71..a9fca2963 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go @@ -4,14 +4,14 @@ package v1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NodePlacementApplyConfiguration represents a declarative configuration of the NodePlacement type for use // with apply. type NodePlacementApplyConfiguration struct { - NodeSelector *v1.LabelSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + NodeSelector *metav1.LabelSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // NodePlacementApplyConfiguration constructs a declarative configuration of the NodePlacement type for use with @@ -23,7 +23,7 @@ func NodePlacement() *NodePlacementApplyConfiguration { // WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *NodePlacementApplyConfiguration) WithNodeSelector(value *v1.LabelSelectorApplyConfiguration) *NodePlacementApplyConfiguration { +func (b *NodePlacementApplyConfiguration) WithNodeSelector(value *metav1.LabelSelectorApplyConfiguration) *NodePlacementApplyConfiguration { b.NodeSelector = value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go index 01167311f..cb3f65687 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // NodePortStrategyApplyConfiguration represents a declarative configuration of the NodePortStrategy type for use // with apply. type NodePortStrategyApplyConfiguration struct { - Protocol *v1.IngressControllerProtocol `json:"protocol,omitempty"` + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` } // NodePortStrategyApplyConfiguration constructs a declarative configuration of the NodePortStrategy type for use with @@ -21,7 +21,7 @@ func NodePortStrategy() *NodePortStrategyApplyConfiguration { // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *NodePortStrategyApplyConfiguration) WithProtocol(value v1.IngressControllerProtocol) *NodePortStrategyApplyConfiguration { +func (b *NodePortStrategyApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *NodePortStrategyApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go index b5a3cac33..3c53a88f0 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go @@ -3,21 +3,21 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { - NodeName *string `json:"nodeName,omitempty"` - CurrentRevision *int32 `json:"currentRevision,omitempty"` - TargetRevision *int32 `json:"targetRevision,omitempty"` - LastFailedRevision *int32 `json:"lastFailedRevision,omitempty"` - LastFailedTime *v1.Time `json:"lastFailedTime,omitempty"` - LastFailedReason *string `json:"lastFailedReason,omitempty"` - LastFailedCount *int `json:"lastFailedCount,omitempty"` - LastFallbackCount *int `json:"lastFallbackCount,omitempty"` - LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + CurrentRevision *int32 `json:"currentRevision,omitempty"` + TargetRevision *int32 `json:"targetRevision,omitempty"` + LastFailedRevision *int32 `json:"lastFailedRevision,omitempty"` + LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` + LastFailedReason *string `json:"lastFailedReason,omitempty"` + LastFailedCount *int `json:"lastFailedCount,omitempty"` + LastFallbackCount *int `json:"lastFallbackCount,omitempty"` + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` } // NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with @@ -61,7 +61,7 @@ func (b *NodeStatusApplyConfiguration) WithLastFailedRevision(value int32) *Node // WithLastFailedTime sets the LastFailedTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastFailedTime field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithLastFailedTime(value v1.Time) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithLastFailedTime(value metav1.Time) *NodeStatusApplyConfiguration { b.LastFailedTime = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go new file mode 100644 index 000000000..93b3c4e79 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OLMApplyConfiguration represents a declarative configuration of the OLM type for use +// with apply. +type OLMApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OLMSpecApplyConfiguration `json:"spec,omitempty"` + Status *OLMStatusApplyConfiguration `json:"status,omitempty"` +} + +// OLM constructs a declarative configuration of the OLM type for use with +// apply. +func OLM(name string) *OLMApplyConfiguration { + b := &OLMApplyConfiguration{} + b.WithName(name) + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractOLM extracts the applied configuration owned by fieldManager from +// oLM. If no managedFields are found in oLM for fieldManager, a +// OLMApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oLM must be a unmodified OLM API object that was retrieved from the Kubernetes API. +// ExtractOLM provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOLM(oLM *operatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "") +} + +// ExtractOLMStatus is the same as ExtractOLM except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOLMStatus(oLM *operatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "status") +} + +func extractOLM(oLM *operatorv1.OLM, fieldManager string, subresource string) (*OLMApplyConfiguration, error) { + b := &OLMApplyConfiguration{} + err := managedfields.ExtractInto(oLM, internal.Parser().Type("com.github.openshift.api.operator.v1.OLM"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oLM.Name) + + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithKind(value string) *OLMApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithAPIVersion(value string) *OLMApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGenerateName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithNamespace(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithUID(value types.UID) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithResourceVersion(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGeneration(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OLMApplyConfiguration) WithLabels(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OLMApplyConfiguration) WithAnnotations(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OLMApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OLMApplyConfiguration) WithFinalizers(values ...string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OLMApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithSpec(value *OLMSpecApplyConfiguration) *OLMApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithStatus(value *OLMStatusApplyConfiguration) *OLMApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OLMApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go new file mode 100644 index 000000000..69309aa49 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OLMSpecApplyConfiguration represents a declarative configuration of the OLMSpec type for use +// with apply. +type OLMSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// OLMSpecApplyConfiguration constructs a declarative configuration of the OLMSpec type for use with +// apply. +func OLMSpec() *OLMSpecApplyConfiguration { + return &OLMSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go new file mode 100644 index 000000000..3c2d4a184 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OLMStatusApplyConfiguration represents a declarative configuration of the OLMStatus type for use +// with apply. +type OLMStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// OLMStatusApplyConfiguration constructs a declarative configuration of the OLMStatus type for use with +// apply. +func OLMStatus() *OLMStatusApplyConfiguration { + return &OLMStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithObservedGeneration(value int64) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OLMStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithVersion(value string) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithReadyReplicas(value int32) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OLMStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go index 34bc2a1b1..f78286043 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // OpenShiftAPIServerApplyConfiguration represents a declarative configuration of the OpenShiftAPIServer type for use // with apply. type OpenShiftAPIServerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *OpenShiftAPIServerSpecApplyConfiguration `json:"spec,omitempty"` - Status *OpenShiftAPIServerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OpenShiftAPIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *OpenShiftAPIServerStatusApplyConfiguration `json:"status,omitempty"` } // OpenShiftAPIServer constructs a declarative configuration of the OpenShiftAPIServer type for use with @@ -41,18 +41,18 @@ func OpenShiftAPIServer(name string) *OpenShiftAPIServerApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractOpenShiftAPIServer(openShiftAPIServer *apioperatorv1.OpenShiftAPIServer, fieldManager string) (*OpenShiftAPIServerApplyConfiguration, error) { +func ExtractOpenShiftAPIServer(openShiftAPIServer *operatorv1.OpenShiftAPIServer, fieldManager string) (*OpenShiftAPIServerApplyConfiguration, error) { return extractOpenShiftAPIServer(openShiftAPIServer, fieldManager, "") } // ExtractOpenShiftAPIServerStatus is the same as ExtractOpenShiftAPIServer except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractOpenShiftAPIServerStatus(openShiftAPIServer *apioperatorv1.OpenShiftAPIServer, fieldManager string) (*OpenShiftAPIServerApplyConfiguration, error) { +func ExtractOpenShiftAPIServerStatus(openShiftAPIServer *operatorv1.OpenShiftAPIServer, fieldManager string) (*OpenShiftAPIServerApplyConfiguration, error) { return extractOpenShiftAPIServer(openShiftAPIServer, fieldManager, "status") } -func extractOpenShiftAPIServer(openShiftAPIServer *apioperatorv1.OpenShiftAPIServer, fieldManager string, subresource string) (*OpenShiftAPIServerApplyConfiguration, error) { +func extractOpenShiftAPIServer(openShiftAPIServer *operatorv1.OpenShiftAPIServer, fieldManager string, subresource string) (*OpenShiftAPIServerApplyConfiguration, error) { b := &OpenShiftAPIServerApplyConfiguration{} err := managedfields.ExtractInto(openShiftAPIServer, internal.Parser().Type("com.github.openshift.api.operator.v1.OpenShiftAPIServer"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractOpenShiftAPIServer(openShiftAPIServer *apioperatorv1.OpenShiftAPISer // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithKind(value string) *OpenShiftAPIServerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithKind(value string) *OpenShift // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithAPIVersion(value string) *OpenShiftAPIServerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithAPIVersion(value string) *Ope // If called multiple times, the Name field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithName(value string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithName(value string) *OpenShift // If called multiple times, the GenerateName field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithGenerateName(value string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithGenerateName(value string) *O // If called multiple times, the Namespace field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithNamespace(value string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithNamespace(value string) *Open // If called multiple times, the UID field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithUID(value types.UID) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithUID(value types.UID) *OpenShi // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithResourceVersion(value string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithGeneration(value int64) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *OpenShiftAPIServerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OpenShiftAPIServerApplyConfiguration { +func (b *OpenShiftAPIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OpenShiftAPIServerApplyConfiguration { +func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *OpenShiftAPIServerApplyConfiguration) WithLabels(entries map[string]string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *OpenShiftAPIServerApplyConfiguration) WithAnnotations(entries map[string]string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *OpenShiftAPIServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OpenShiftAPIServerApplyConfiguration { +func (b *OpenShiftAPIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *OpenShiftAPIServerApplyConfiguration) WithFinalizers(values ...string) *OpenShiftAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *OpenShiftAPIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithStatus(value *OpenShiftAPISer // GetName retrieves the value of the Name field in the declarative configuration. func (b *OpenShiftAPIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go index 1e5545164..562b43032 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go @@ -23,7 +23,7 @@ func OpenShiftAPIServerSpec() *OpenShiftAPIServerSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *OpenShiftAPIServerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OpenShiftAPIServerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *OpenShiftAPIServerSpecApplyConfiguration) WithManagementState(value ope // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *OpenShiftAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OpenShiftAPIServerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *OpenShiftAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *OpenShiftAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OpenShiftAPIServerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *OpenShiftAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value op // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *OpenShiftAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OpenShiftAPIServerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *OpenShiftAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverride // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *OpenShiftAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OpenShiftAPIServerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go index 8abe09c0a..776701d54 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go @@ -18,7 +18,7 @@ func OpenShiftAPIServerStatus() *OpenShiftAPIServerStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *OpenShiftAPIServerStatusApplyConfiguration) WithObservedGeneration(value int64) *OpenShiftAPIServerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *OpenShiftAPIServerStatusApplyConfiguration) WithConditions(values ...*O if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *OpenShiftAPIServerStatusApplyConfiguration) WithConditions(values ...*O // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *OpenShiftAPIServerStatusApplyConfiguration) WithVersion(value string) *OpenShiftAPIServerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *OpenShiftAPIServerStatusApplyConfiguration) WithVersion(value string) * // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *OpenShiftAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) *OpenShiftAPIServerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *OpenShiftAPIServerStatusApplyConfiguration) WithReadyReplicas(value int // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *OpenShiftAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OpenShiftAPIServerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *OpenShiftAPIServerStatusApplyConfiguration) WithGenerations(values ...* if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go index d72eb42c7..d8dbb4848 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // OpenShiftControllerManagerApplyConfiguration represents a declarative configuration of the OpenShiftControllerManager type for use // with apply. type OpenShiftControllerManagerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *OpenShiftControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` - Status *OpenShiftControllerManagerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OpenShiftControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *OpenShiftControllerManagerStatusApplyConfiguration `json:"status,omitempty"` } // OpenShiftControllerManager constructs a declarative configuration of the OpenShiftControllerManager type for use with @@ -41,18 +41,18 @@ func OpenShiftControllerManager(name string) *OpenShiftControllerManagerApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractOpenShiftControllerManager(openShiftControllerManager *apioperatorv1.OpenShiftControllerManager, fieldManager string) (*OpenShiftControllerManagerApplyConfiguration, error) { +func ExtractOpenShiftControllerManager(openShiftControllerManager *operatorv1.OpenShiftControllerManager, fieldManager string) (*OpenShiftControllerManagerApplyConfiguration, error) { return extractOpenShiftControllerManager(openShiftControllerManager, fieldManager, "") } // ExtractOpenShiftControllerManagerStatus is the same as ExtractOpenShiftControllerManager except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractOpenShiftControllerManagerStatus(openShiftControllerManager *apioperatorv1.OpenShiftControllerManager, fieldManager string) (*OpenShiftControllerManagerApplyConfiguration, error) { +func ExtractOpenShiftControllerManagerStatus(openShiftControllerManager *operatorv1.OpenShiftControllerManager, fieldManager string) (*OpenShiftControllerManagerApplyConfiguration, error) { return extractOpenShiftControllerManager(openShiftControllerManager, fieldManager, "status") } -func extractOpenShiftControllerManager(openShiftControllerManager *apioperatorv1.OpenShiftControllerManager, fieldManager string, subresource string) (*OpenShiftControllerManagerApplyConfiguration, error) { +func extractOpenShiftControllerManager(openShiftControllerManager *operatorv1.OpenShiftControllerManager, fieldManager string, subresource string) (*OpenShiftControllerManagerApplyConfiguration, error) { b := &OpenShiftControllerManagerApplyConfiguration{} err := managedfields.ExtractInto(openShiftControllerManager, internal.Parser().Type("com.github.openshift.api.operator.v1.OpenShiftControllerManager"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractOpenShiftControllerManager(openShiftControllerManager *apioperatorv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithKind(value string) *OpenShiftControllerManagerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithKind(value string) *O // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithAPIVersion(value string) *OpenShiftControllerManagerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithName(value string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithName(value string) *O // If called multiple times, the GenerateName field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithGenerateName(value string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithNamespace(value string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithUID(value types.UID) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithResourceVersion(value string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithGeneration(value int64) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *OpenShiftControllerManagerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OpenShiftControllerManagerApplyConfiguration { +func (b *OpenShiftControllerManagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OpenShiftControllerManagerApplyConfiguration { +func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *OpenShiftControllerManagerApplyConfiguration) WithLabels(entries map[string]string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *OpenShiftControllerManagerApplyConfiguration) WithAnnotations(entries map[string]string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithAnnotations(entries m // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *OpenShiftControllerManagerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OpenShiftControllerManagerApplyConfiguration { +func (b *OpenShiftControllerManagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithOwnerReferences(value func (b *OpenShiftControllerManagerApplyConfiguration) WithFinalizers(values ...string) *OpenShiftControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *OpenShiftControllerManagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithStatus(value *OpenShi // GetName retrieves the value of the Name field in the declarative configuration. func (b *OpenShiftControllerManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go index feea2556f..5c6a0ff50 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go @@ -23,7 +23,7 @@ func OpenShiftControllerManagerSpec() *OpenShiftControllerManagerSpecApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OpenShiftControllerManagerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithManagementState(v // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OpenShiftControllerManagerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithLogLevel(value op // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OpenShiftControllerManagerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithOperatorLogLevel( // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OpenShiftControllerManagerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithUnsupportedConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OpenShiftControllerManagerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go index 77cffb09f..c5b960e23 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go @@ -18,7 +18,7 @@ func OpenShiftControllerManagerStatus() *OpenShiftControllerManagerStatusApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *OpenShiftControllerManagerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithConditions(valu if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithConditions(valu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithVersion(value string) *OpenShiftControllerManagerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithVersion(value s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *OpenShiftControllerManagerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithReadyReplicas(v // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OpenShiftControllerManagerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithGenerations(val if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go index 416f9099c..b1dd640f6 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go @@ -3,17 +3,17 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // OpenShiftSDNConfigApplyConfiguration represents a declarative configuration of the OpenShiftSDNConfig type for use // with apply. type OpenShiftSDNConfigApplyConfiguration struct { - Mode *v1.SDNMode `json:"mode,omitempty"` - VXLANPort *uint32 `json:"vxlanPort,omitempty"` - MTU *uint32 `json:"mtu,omitempty"` - UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` - EnableUnidling *bool `json:"enableUnidling,omitempty"` + Mode *operatorv1.SDNMode `json:"mode,omitempty"` + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + MTU *uint32 `json:"mtu,omitempty"` + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + EnableUnidling *bool `json:"enableUnidling,omitempty"` } // OpenShiftSDNConfigApplyConfiguration constructs a declarative configuration of the OpenShiftSDNConfig type for use with @@ -25,7 +25,7 @@ func OpenShiftSDNConfig() *OpenShiftSDNConfigApplyConfiguration { // WithMode sets the Mode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Mode field is set to the value of the last call. -func (b *OpenShiftSDNConfigApplyConfiguration) WithMode(value v1.SDNMode) *OpenShiftSDNConfigApplyConfiguration { +func (b *OpenShiftSDNConfigApplyConfiguration) WithMode(value operatorv1.SDNMode) *OpenShiftSDNConfigApplyConfiguration { b.Mode = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go index 07d523555..811b2330b 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go @@ -5,7 +5,7 @@ package v1 // OpenStackLoadBalancerParametersApplyConfiguration represents a declarative configuration of the OpenStackLoadBalancerParameters type for use // with apply. type OpenStackLoadBalancerParametersApplyConfiguration struct { - LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` + FloatingIP *string `json:"floatingIP,omitempty"` } // OpenStackLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the OpenStackLoadBalancerParameters type for use with @@ -14,10 +14,10 @@ func OpenStackLoadBalancerParameters() *OpenStackLoadBalancerParametersApplyConf return &OpenStackLoadBalancerParametersApplyConfiguration{} } -// WithLoadBalancerIP sets the LoadBalancerIP field in the declarative configuration to the given value +// WithFloatingIP sets the FloatingIP field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LoadBalancerIP field is set to the value of the last call. -func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithLoadBalancerIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { - b.LoadBalancerIP = &value +// If called multiple times, the FloatingIP field is set to the value of the last call. +func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithFloatingIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { + b.FloatingIP = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go index 974f12d16..57bffabd2 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go @@ -3,18 +3,18 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // OperatorConditionApplyConfiguration represents a declarative configuration of the OperatorCondition type for use // with apply. type OperatorConditionApplyConfiguration struct { - Type *string `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *string `json:"type,omitempty"` + Status *operatorv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // OperatorConditionApplyConfiguration constructs a declarative configuration of the OperatorCondition type for use with @@ -34,7 +34,7 @@ func (b *OperatorConditionApplyConfiguration) WithType(value string) *OperatorCo // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *OperatorConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *OperatorConditionApplyConfiguration { +func (b *OperatorConditionApplyConfiguration) WithStatus(value operatorv1.ConditionStatus) *OperatorConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go index acbba63c5..6be07d417 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go @@ -3,18 +3,18 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // OperatorSpecApplyConfiguration represents a declarative configuration of the OperatorSpec type for use // with apply. type OperatorSpecApplyConfiguration struct { - ManagementState *v1.ManagementState `json:"managementState,omitempty"` - LogLevel *v1.LogLevel `json:"logLevel,omitempty"` - OperatorLogLevel *v1.LogLevel `json:"operatorLogLevel,omitempty"` - UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` - ObservedConfig *runtime.RawExtension `json:"observedConfig,omitempty"` + ManagementState *operatorv1.ManagementState `json:"managementState,omitempty"` + LogLevel *operatorv1.LogLevel `json:"logLevel,omitempty"` + OperatorLogLevel *operatorv1.LogLevel `json:"operatorLogLevel,omitempty"` + UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` + ObservedConfig *runtime.RawExtension `json:"observedConfig,omitempty"` } // OperatorSpecApplyConfiguration constructs a declarative configuration of the OperatorSpec type for use with @@ -26,7 +26,7 @@ func OperatorSpec() *OperatorSpecApplyConfiguration { // WithManagementState sets the ManagementState field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. -func (b *OperatorSpecApplyConfiguration) WithManagementState(value v1.ManagementState) *OperatorSpecApplyConfiguration { +func (b *OperatorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OperatorSpecApplyConfiguration { b.ManagementState = &value return b } @@ -34,7 +34,7 @@ func (b *OperatorSpecApplyConfiguration) WithManagementState(value v1.Management // WithLogLevel sets the LogLevel field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. -func (b *OperatorSpecApplyConfiguration) WithLogLevel(value v1.LogLevel) *OperatorSpecApplyConfiguration { +func (b *OperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OperatorSpecApplyConfiguration { b.LogLevel = &value return b } @@ -42,7 +42,7 @@ func (b *OperatorSpecApplyConfiguration) WithLogLevel(value v1.LogLevel) *Operat // WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. -func (b *OperatorSpecApplyConfiguration) WithOperatorLogLevel(value v1.LogLevel) *OperatorSpecApplyConfiguration { +func (b *OperatorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OperatorSpecApplyConfiguration { b.OperatorLogLevel = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go index 040357597..885c40279 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PartialSelectorApplyConfiguration represents a declarative configuration of the PartialSelector type for use // with apply. type PartialSelectorApplyConfiguration struct { - MachineResourceSelector *v1.LabelSelectorApplyConfiguration `json:"machineResourceSelector,omitempty"` + MachineResourceSelector *metav1.LabelSelectorApplyConfiguration `json:"machineResourceSelector,omitempty"` } // PartialSelectorApplyConfiguration constructs a declarative configuration of the PartialSelector type for use with @@ -21,7 +21,7 @@ func PartialSelector() *PartialSelectorApplyConfiguration { // WithMachineResourceSelector sets the MachineResourceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MachineResourceSelector field is set to the value of the last call. -func (b *PartialSelectorApplyConfiguration) WithMachineResourceSelector(value *v1.LabelSelectorApplyConfiguration) *PartialSelectorApplyConfiguration { +func (b *PartialSelectorApplyConfiguration) WithMachineResourceSelector(value *metav1.LabelSelectorApplyConfiguration) *PartialSelectorApplyConfiguration { b.MachineResourceSelector = value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go index 1dd11f800..222557419 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // PerspectiveVisibilityApplyConfiguration represents a declarative configuration of the PerspectiveVisibility type for use // with apply. type PerspectiveVisibilityApplyConfiguration struct { - State *v1.PerspectiveState `json:"state,omitempty"` + State *operatorv1.PerspectiveState `json:"state,omitempty"` AccessReview *ResourceAttributesAccessReviewApplyConfiguration `json:"accessReview,omitempty"` } @@ -22,7 +22,7 @@ func PerspectiveVisibility() *PerspectiveVisibilityApplyConfiguration { // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. -func (b *PerspectiveVisibilityApplyConfiguration) WithState(value v1.PerspectiveState) *PerspectiveVisibilityApplyConfiguration { +func (b *PerspectiveVisibilityApplyConfiguration) WithState(value operatorv1.PerspectiveState) *PerspectiveVisibilityApplyConfiguration { b.State = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go index 4154e999d..411571380 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // PrivateStrategyApplyConfiguration represents a declarative configuration of the PrivateStrategy type for use // with apply. type PrivateStrategyApplyConfiguration struct { - Protocol *v1.IngressControllerProtocol `json:"protocol,omitempty"` + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` } // PrivateStrategyApplyConfiguration constructs a declarative configuration of the PrivateStrategy type for use with @@ -21,7 +21,7 @@ func PrivateStrategy() *PrivateStrategyApplyConfiguration { // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *PrivateStrategyApplyConfiguration) WithProtocol(value v1.IngressControllerProtocol) *PrivateStrategyApplyConfiguration { +func (b *PrivateStrategyApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *PrivateStrategyApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go index 6f8675764..0812e6974 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // ProviderLoadBalancerParametersApplyConfiguration represents a declarative configuration of the ProviderLoadBalancerParameters type for use // with apply. type ProviderLoadBalancerParametersApplyConfiguration struct { - Type *v1.LoadBalancerProviderType `json:"type,omitempty"` + Type *operatorv1.LoadBalancerProviderType `json:"type,omitempty"` AWS *AWSLoadBalancerParametersApplyConfiguration `json:"aws,omitempty"` GCP *GCPLoadBalancerParametersApplyConfiguration `json:"gcp,omitempty"` IBM *IBMLoadBalancerParametersApplyConfiguration `json:"ibm,omitempty"` @@ -25,7 +25,7 @@ func ProviderLoadBalancerParameters() *ProviderLoadBalancerParametersApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ProviderLoadBalancerParametersApplyConfiguration) WithType(value v1.LoadBalancerProviderType) *ProviderLoadBalancerParametersApplyConfiguration { +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithType(value operatorv1.LoadBalancerProviderType) *ProviderLoadBalancerParametersApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go index 334264d97..2565e3876 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // ProxyConfigApplyConfiguration represents a declarative configuration of the ProxyConfig type for use // with apply. type ProxyConfigApplyConfiguration struct { - IptablesSyncPeriod *string `json:"iptablesSyncPeriod,omitempty"` - BindAddress *string `json:"bindAddress,omitempty"` - ProxyArguments map[string]v1.ProxyArgumentList `json:"proxyArguments,omitempty"` + IptablesSyncPeriod *string `json:"iptablesSyncPeriod,omitempty"` + BindAddress *string `json:"bindAddress,omitempty"` + ProxyArguments map[string]operatorv1.ProxyArgumentList `json:"proxyArguments,omitempty"` } // ProxyConfigApplyConfiguration constructs a declarative configuration of the ProxyConfig type for use with @@ -40,9 +40,9 @@ func (b *ProxyConfigApplyConfiguration) WithBindAddress(value string) *ProxyConf // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the ProxyArguments field, // overwriting an existing map entries in ProxyArguments field with the same key. -func (b *ProxyConfigApplyConfiguration) WithProxyArguments(entries map[string]v1.ProxyArgumentList) *ProxyConfigApplyConfiguration { +func (b *ProxyConfigApplyConfiguration) WithProxyArguments(entries map[string]operatorv1.ProxyArgumentList) *ProxyConfigApplyConfiguration { if b.ProxyArguments == nil && len(entries) > 0 { - b.ProxyArguments = make(map[string]v1.ProxyArgumentList, len(entries)) + b.ProxyArguments = make(map[string]operatorv1.ProxyArgumentList, len(entries)) } for k, v := range entries { b.ProxyArguments[k] = v diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go index 5eb52c9f0..aef55a1b1 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // ReloadServiceApplyConfiguration represents a declarative configuration of the ReloadService type for use // with apply. type ReloadServiceApplyConfiguration struct { - ServiceName *v1.NodeDisruptionPolicyServiceName `json:"serviceName,omitempty"` + ServiceName *operatorv1.NodeDisruptionPolicyServiceName `json:"serviceName,omitempty"` } // ReloadServiceApplyConfiguration constructs a declarative configuration of the ReloadService type for use with @@ -21,7 +21,7 @@ func ReloadService() *ReloadServiceApplyConfiguration { // WithServiceName sets the ServiceName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ServiceName field is set to the value of the last call. -func (b *ReloadServiceApplyConfiguration) WithServiceName(value v1.NodeDisruptionPolicyServiceName) *ReloadServiceApplyConfiguration { +func (b *ReloadServiceApplyConfiguration) WithServiceName(value operatorv1.NodeDisruptionPolicyServiceName) *ReloadServiceApplyConfiguration { b.ServiceName = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go index 8e4b24966..96e749c5f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" ) // ResourceAttributesAccessReviewApplyConfiguration represents a declarative configuration of the ResourceAttributesAccessReview type for use // with apply. type ResourceAttributesAccessReviewApplyConfiguration struct { - Required []v1.ResourceAttributes `json:"required,omitempty"` - Missing []v1.ResourceAttributes `json:"missing,omitempty"` + Required []authorizationv1.ResourceAttributes `json:"required,omitempty"` + Missing []authorizationv1.ResourceAttributes `json:"missing,omitempty"` } // ResourceAttributesAccessReviewApplyConfiguration constructs a declarative configuration of the ResourceAttributesAccessReview type for use with @@ -22,7 +22,7 @@ func ResourceAttributesAccessReview() *ResourceAttributesAccessReviewApplyConfig // WithRequired adds the given value to the Required field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Required field. -func (b *ResourceAttributesAccessReviewApplyConfiguration) WithRequired(values ...v1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { +func (b *ResourceAttributesAccessReviewApplyConfiguration) WithRequired(values ...authorizationv1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { for i := range values { b.Required = append(b.Required, values[i]) } @@ -32,7 +32,7 @@ func (b *ResourceAttributesAccessReviewApplyConfiguration) WithRequired(values . // WithMissing adds the given value to the Missing field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Missing field. -func (b *ResourceAttributesAccessReviewApplyConfiguration) WithMissing(values ...v1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { +func (b *ResourceAttributesAccessReviewApplyConfiguration) WithMissing(values ...authorizationv1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { for i := range values { b.Missing = append(b.Missing, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go index 54b9051a3..36c43a116 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // RestartServiceApplyConfiguration represents a declarative configuration of the RestartService type for use // with apply. type RestartServiceApplyConfiguration struct { - ServiceName *v1.NodeDisruptionPolicyServiceName `json:"serviceName,omitempty"` + ServiceName *operatorv1.NodeDisruptionPolicyServiceName `json:"serviceName,omitempty"` } // RestartServiceApplyConfiguration constructs a declarative configuration of the RestartService type for use with @@ -21,7 +21,7 @@ func RestartService() *RestartServiceApplyConfiguration { // WithServiceName sets the ServiceName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ServiceName field is set to the value of the last call. -func (b *RestartServiceApplyConfiguration) WithServiceName(value v1.NodeDisruptionPolicyServiceName) *RestartServiceApplyConfiguration { +func (b *RestartServiceApplyConfiguration) WithServiceName(value operatorv1.NodeDisruptionPolicyServiceName) *RestartServiceApplyConfiguration { b.ServiceName = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go index d51355123..5faa3c56b 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // RouteAdmissionPolicyApplyConfiguration represents a declarative configuration of the RouteAdmissionPolicy type for use // with apply. type RouteAdmissionPolicyApplyConfiguration struct { - NamespaceOwnership *v1.NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` - WildcardPolicy *v1.WildcardPolicy `json:"wildcardPolicy,omitempty"` + NamespaceOwnership *operatorv1.NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` + WildcardPolicy *operatorv1.WildcardPolicy `json:"wildcardPolicy,omitempty"` } // RouteAdmissionPolicyApplyConfiguration constructs a declarative configuration of the RouteAdmissionPolicy type for use with @@ -22,7 +22,7 @@ func RouteAdmissionPolicy() *RouteAdmissionPolicyApplyConfiguration { // WithNamespaceOwnership sets the NamespaceOwnership field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceOwnership field is set to the value of the last call. -func (b *RouteAdmissionPolicyApplyConfiguration) WithNamespaceOwnership(value v1.NamespaceOwnershipCheck) *RouteAdmissionPolicyApplyConfiguration { +func (b *RouteAdmissionPolicyApplyConfiguration) WithNamespaceOwnership(value operatorv1.NamespaceOwnershipCheck) *RouteAdmissionPolicyApplyConfiguration { b.NamespaceOwnership = &value return b } @@ -30,7 +30,7 @@ func (b *RouteAdmissionPolicyApplyConfiguration) WithNamespaceOwnership(value v1 // WithWildcardPolicy sets the WildcardPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WildcardPolicy field is set to the value of the last call. -func (b *RouteAdmissionPolicyApplyConfiguration) WithWildcardPolicy(value v1.WildcardPolicy) *RouteAdmissionPolicyApplyConfiguration { +func (b *RouteAdmissionPolicyApplyConfiguration) WithWildcardPolicy(value operatorv1.WildcardPolicy) *RouteAdmissionPolicyApplyConfiguration { b.WildcardPolicy = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go index b8962296f..f4a6de0b6 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go @@ -3,14 +3,14 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ServiceAccountIssuerStatusApplyConfiguration represents a declarative configuration of the ServiceAccountIssuerStatus type for use // with apply. type ServiceAccountIssuerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - ExpirationTime *v1.Time `json:"expirationTime,omitempty"` + Name *string `json:"name,omitempty"` + ExpirationTime *metav1.Time `json:"expirationTime,omitempty"` } // ServiceAccountIssuerStatusApplyConfiguration constructs a declarative configuration of the ServiceAccountIssuerStatus type for use with @@ -30,7 +30,7 @@ func (b *ServiceAccountIssuerStatusApplyConfiguration) WithName(value string) *S // WithExpirationTime sets the ExpirationTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExpirationTime field is set to the value of the last call. -func (b *ServiceAccountIssuerStatusApplyConfiguration) WithExpirationTime(value v1.Time) *ServiceAccountIssuerStatusApplyConfiguration { +func (b *ServiceAccountIssuerStatusApplyConfiguration) WithExpirationTime(value metav1.Time) *ServiceAccountIssuerStatusApplyConfiguration { b.ExpirationTime = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go index cceba5d9d..bb0757381 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ServiceCAApplyConfiguration represents a declarative configuration of the ServiceCA type for use // with apply. type ServiceCAApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ServiceCASpecApplyConfiguration `json:"spec,omitempty"` - Status *ServiceCAStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCASpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCAStatusApplyConfiguration `json:"status,omitempty"` } // ServiceCA constructs a declarative configuration of the ServiceCA type for use with @@ -41,18 +41,18 @@ func ServiceCA(name string) *ServiceCAApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractServiceCA(serviceCA *apioperatorv1.ServiceCA, fieldManager string) (*ServiceCAApplyConfiguration, error) { +func ExtractServiceCA(serviceCA *operatorv1.ServiceCA, fieldManager string) (*ServiceCAApplyConfiguration, error) { return extractServiceCA(serviceCA, fieldManager, "") } // ExtractServiceCAStatus is the same as ExtractServiceCA except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceCAStatus(serviceCA *apioperatorv1.ServiceCA, fieldManager string) (*ServiceCAApplyConfiguration, error) { +func ExtractServiceCAStatus(serviceCA *operatorv1.ServiceCA, fieldManager string) (*ServiceCAApplyConfiguration, error) { return extractServiceCA(serviceCA, fieldManager, "status") } -func extractServiceCA(serviceCA *apioperatorv1.ServiceCA, fieldManager string, subresource string) (*ServiceCAApplyConfiguration, error) { +func extractServiceCA(serviceCA *operatorv1.ServiceCA, fieldManager string, subresource string) (*ServiceCAApplyConfiguration, error) { b := &ServiceCAApplyConfiguration{} err := managedfields.ExtractInto(serviceCA, internal.Parser().Type("com.github.openshift.api.operator.v1.ServiceCA"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractServiceCA(serviceCA *apioperatorv1.ServiceCA, fieldManager string, s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithKind(value string) *ServiceCAApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ServiceCAApplyConfiguration) WithKind(value string) *ServiceCAApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithAPIVersion(value string) *ServiceCAApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ServiceCAApplyConfiguration) WithAPIVersion(value string) *ServiceCAApp // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithName(value string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ServiceCAApplyConfiguration) WithName(value string) *ServiceCAApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithGenerateName(value string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ServiceCAApplyConfiguration) WithGenerateName(value string) *ServiceCAA // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithNamespace(value string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ServiceCAApplyConfiguration) WithNamespace(value string) *ServiceCAAppl // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithUID(value types.UID) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ServiceCAApplyConfiguration) WithUID(value types.UID) *ServiceCAApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithResourceVersion(value string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ServiceCAApplyConfiguration) WithResourceVersion(value string) *Service // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithGeneration(value int64) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceCAApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCAApplyConfiguration { +func (b *ServiceCAApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceCAApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCAApplyConfiguration { +func (b *ServiceCAApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ServiceCAApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceCAApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ServiceCAApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *ServiceCAApplyConfiguration) WithLabels(entries map[string]string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ServiceCAApplyConfiguration) WithLabels(entries map[string]string) *Ser // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceCAApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ServiceCAApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceCAApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCAApplyConfiguration { +func (b *ServiceCAApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ServiceCAApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *ServiceCAApplyConfiguration) WithFinalizers(values ...string) *ServiceCAApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceCAApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ServiceCAApplyConfiguration) WithStatus(value *ServiceCAStatusApplyConf // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCAApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go index c083e932e..844041ef3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go @@ -23,7 +23,7 @@ func ServiceCASpec() *ServiceCASpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *ServiceCASpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ServiceCASpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *ServiceCASpecApplyConfiguration) WithManagementState(value operatorv1.M // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *ServiceCASpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ServiceCASpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *ServiceCASpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *ServiceCASpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ServiceCASpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *ServiceCASpecApplyConfiguration) WithOperatorLogLevel(value operatorv1. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *ServiceCASpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ServiceCASpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *ServiceCASpecApplyConfiguration) WithUnsupportedConfigOverrides(value r // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *ServiceCASpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ServiceCASpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go index 91fdbaf2d..957190e8b 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go @@ -18,7 +18,7 @@ func ServiceCAStatus() *ServiceCAStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *ServiceCAStatusApplyConfiguration) WithObservedGeneration(value int64) *ServiceCAStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *ServiceCAStatusApplyConfiguration) WithConditions(values ...*OperatorCo if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *ServiceCAStatusApplyConfiguration) WithConditions(values ...*OperatorCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *ServiceCAStatusApplyConfiguration) WithVersion(value string) *ServiceCAStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *ServiceCAStatusApplyConfiguration) WithVersion(value string) *ServiceCA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *ServiceCAStatusApplyConfiguration) WithReadyReplicas(value int32) *ServiceCAStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *ServiceCAStatusApplyConfiguration) WithReadyReplicas(value int32) *Serv // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *ServiceCAStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ServiceCAStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *ServiceCAStatusApplyConfiguration) WithGenerations(values ...*Generatio if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go index 62bbc1637..52981ca86 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ServiceCatalogAPIServerApplyConfiguration represents a declarative configuration of the ServiceCatalogAPIServer type for use // with apply. type ServiceCatalogAPIServerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ServiceCatalogAPIServerSpecApplyConfiguration `json:"spec,omitempty"` - Status *ServiceCatalogAPIServerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCatalogAPIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCatalogAPIServerStatusApplyConfiguration `json:"status,omitempty"` } // ServiceCatalogAPIServer constructs a declarative configuration of the ServiceCatalogAPIServer type for use with @@ -41,18 +41,18 @@ func ServiceCatalogAPIServer(name string) *ServiceCatalogAPIServerApplyConfigura // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractServiceCatalogAPIServer(serviceCatalogAPIServer *apioperatorv1.ServiceCatalogAPIServer, fieldManager string) (*ServiceCatalogAPIServerApplyConfiguration, error) { +func ExtractServiceCatalogAPIServer(serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, fieldManager string) (*ServiceCatalogAPIServerApplyConfiguration, error) { return extractServiceCatalogAPIServer(serviceCatalogAPIServer, fieldManager, "") } // ExtractServiceCatalogAPIServerStatus is the same as ExtractServiceCatalogAPIServer except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceCatalogAPIServerStatus(serviceCatalogAPIServer *apioperatorv1.ServiceCatalogAPIServer, fieldManager string) (*ServiceCatalogAPIServerApplyConfiguration, error) { +func ExtractServiceCatalogAPIServerStatus(serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, fieldManager string) (*ServiceCatalogAPIServerApplyConfiguration, error) { return extractServiceCatalogAPIServer(serviceCatalogAPIServer, fieldManager, "status") } -func extractServiceCatalogAPIServer(serviceCatalogAPIServer *apioperatorv1.ServiceCatalogAPIServer, fieldManager string, subresource string) (*ServiceCatalogAPIServerApplyConfiguration, error) { +func extractServiceCatalogAPIServer(serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, fieldManager string, subresource string) (*ServiceCatalogAPIServerApplyConfiguration, error) { b := &ServiceCatalogAPIServerApplyConfiguration{} err := managedfields.ExtractInto(serviceCatalogAPIServer, internal.Parser().Type("com.github.openshift.api.operator.v1.ServiceCatalogAPIServer"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractServiceCatalogAPIServer(serviceCatalogAPIServer *apioperatorv1.Servi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithKind(value string) *ServiceCatalogAPIServerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithKind(value string) *Serv // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithAPIVersion(value string) *ServiceCatalogAPIServerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithName(value string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithName(value string) *Serv // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithGenerateName(value string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithNamespace(value string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithUID(value types.UID) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithUID(value types.UID) *Se // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithResourceVersion(value string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithGeneration(value int64) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceCatalogAPIServerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCatalogAPIServerApplyConfiguration { +func (b *ServiceCatalogAPIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCatalogAPIServerApplyConfiguration { +func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *ServiceCatalogAPIServerApplyConfiguration) WithLabels(entries map[string]string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceCatalogAPIServerApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithAnnotations(entries map[ // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceCatalogAPIServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCatalogAPIServerApplyConfiguration { +func (b *ServiceCatalogAPIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithOwnerReferences(values . func (b *ServiceCatalogAPIServerApplyConfiguration) WithFinalizers(values ...string) *ServiceCatalogAPIServerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceCatalogAPIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithStatus(value *ServiceCat // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCatalogAPIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go index c43a956eb..b5271a409 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go @@ -23,7 +23,7 @@ func ServiceCatalogAPIServerSpec() *ServiceCatalogAPIServerSpecApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ServiceCatalogAPIServerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithManagementState(valu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ServiceCatalogAPIServerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithLogLevel(value opera // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ServiceCatalogAPIServerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithOperatorLogLevel(val // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ServiceCatalogAPIServerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithUnsupportedConfigOve // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ServiceCatalogAPIServerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go index e7e16aebd..a82e4e5f0 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go @@ -18,7 +18,7 @@ func ServiceCatalogAPIServerStatus() *ServiceCatalogAPIServerStatusApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithObservedGeneration(value int64) *ServiceCatalogAPIServerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithConditions(values if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithConditions(values // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithVersion(value string) *ServiceCatalogAPIServerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithVersion(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) *ServiceCatalogAPIServerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithReadyReplicas(valu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ServiceCatalogAPIServerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithGenerations(values if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go index 3755bd1c4..f01957710 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ServiceCatalogControllerManagerApplyConfiguration represents a declarative configuration of the ServiceCatalogControllerManager type for use // with apply. type ServiceCatalogControllerManagerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ServiceCatalogControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` - Status *ServiceCatalogControllerManagerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCatalogControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCatalogControllerManagerStatusApplyConfiguration `json:"status,omitempty"` } // ServiceCatalogControllerManager constructs a declarative configuration of the ServiceCatalogControllerManager type for use with @@ -41,18 +41,18 @@ func ServiceCatalogControllerManager(name string) *ServiceCatalogControllerManag // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractServiceCatalogControllerManager(serviceCatalogControllerManager *apioperatorv1.ServiceCatalogControllerManager, fieldManager string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { +func ExtractServiceCatalogControllerManager(serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, fieldManager string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { return extractServiceCatalogControllerManager(serviceCatalogControllerManager, fieldManager, "") } // ExtractServiceCatalogControllerManagerStatus is the same as ExtractServiceCatalogControllerManager except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceCatalogControllerManagerStatus(serviceCatalogControllerManager *apioperatorv1.ServiceCatalogControllerManager, fieldManager string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { +func ExtractServiceCatalogControllerManagerStatus(serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, fieldManager string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { return extractServiceCatalogControllerManager(serviceCatalogControllerManager, fieldManager, "status") } -func extractServiceCatalogControllerManager(serviceCatalogControllerManager *apioperatorv1.ServiceCatalogControllerManager, fieldManager string, subresource string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { +func extractServiceCatalogControllerManager(serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, fieldManager string, subresource string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { b := &ServiceCatalogControllerManagerApplyConfiguration{} err := managedfields.ExtractInto(serviceCatalogControllerManager, internal.Parser().Type("com.github.openshift.api.operator.v1.ServiceCatalogControllerManager"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractServiceCatalogControllerManager(serviceCatalogControllerManager *api // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithKind(value string) *ServiceCatalogControllerManagerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithKind(value strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithAPIVersion(value string) *ServiceCatalogControllerManagerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithAPIVersion(value // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithName(value string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithName(value strin // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithGenerateName(value string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithGenerateName(val // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithNamespace(value string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithUID(value types.UID) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithUID(value types. // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithResourceVersion(value string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithResourceVersion( // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithGeneration(value int64) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceCatalogControllerManagerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCatalogControllerManagerApplyConfiguration { +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCatalogControllerManagerApplyConfiguration { +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionTimestam // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionGracePer // overwriting an existing map entries in Labels field with the same key. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithLabels(entries map[string]string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithLabels(entries m // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceCatalogControllerManagerApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithAnnotations(entr // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceCatalogControllerManagerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCatalogControllerManagerApplyConfiguration { +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithOwnerReferences( func (b *ServiceCatalogControllerManagerApplyConfiguration) WithFinalizers(values ...string) *ServiceCatalogControllerManagerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceCatalogControllerManagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithStatus(value *Se // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCatalogControllerManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go index 301fd67b5..83df00b3b 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go @@ -23,7 +23,7 @@ func ServiceCatalogControllerManagerSpec() *ServiceCatalogControllerManagerSpecA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ServiceCatalogControllerManagerSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -31,7 +31,7 @@ func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithManagementSt // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ServiceCatalogControllerManagerSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -39,7 +39,7 @@ func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithLogLevel(val // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ServiceCatalogControllerManagerSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -47,7 +47,7 @@ func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithOperatorLogL // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ServiceCatalogControllerManagerSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -55,6 +55,6 @@ func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithUnsupportedC // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ServiceCatalogControllerManagerSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go index f01f6f796..d15370217 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go @@ -18,7 +18,7 @@ func ServiceCatalogControllerManagerStatus() *ServiceCatalogControllerManagerSta // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *ServiceCatalogControllerManagerStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithConditions if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithConditions // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithVersion(value string) *ServiceCatalogControllerManagerStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithVersion(va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *ServiceCatalogControllerManagerStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithReadyRepli // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ServiceCatalogControllerManagerStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithGeneration if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go index 3b03dda22..350bfbd98 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go @@ -3,13 +3,13 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // SFlowConfigApplyConfiguration represents a declarative configuration of the SFlowConfig type for use // with apply. type SFlowConfigApplyConfiguration struct { - Collectors []v1.IPPort `json:"collectors,omitempty"` + Collectors []operatorv1.IPPort `json:"collectors,omitempty"` } // SFlowConfigApplyConfiguration constructs a declarative configuration of the SFlowConfig type for use with @@ -21,7 +21,7 @@ func SFlowConfig() *SFlowConfigApplyConfiguration { // WithCollectors adds the given value to the Collectors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Collectors field. -func (b *SFlowConfigApplyConfiguration) WithCollectors(values ...v1.IPPort) *SFlowConfigApplyConfiguration { +func (b *SFlowConfigApplyConfiguration) WithCollectors(values ...operatorv1.IPPort) *SFlowConfigApplyConfiguration { for i := range values { b.Collectors = append(b.Collectors, values[i]) } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go index b033f0d9a..b2434f8d7 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go @@ -26,7 +26,7 @@ func StaticPodOperatorSpec() *StaticPodOperatorSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *StaticPodOperatorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *StaticPodOperatorSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -34,7 +34,7 @@ func (b *StaticPodOperatorSpecApplyConfiguration) WithManagementState(value oper // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *StaticPodOperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *StaticPodOperatorSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -42,7 +42,7 @@ func (b *StaticPodOperatorSpecApplyConfiguration) WithLogLevel(value operatorv1. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *StaticPodOperatorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *StaticPodOperatorSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -50,7 +50,7 @@ func (b *StaticPodOperatorSpecApplyConfiguration) WithOperatorLogLevel(value ope // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *StaticPodOperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *StaticPodOperatorSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -58,7 +58,7 @@ func (b *StaticPodOperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *StaticPodOperatorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *StaticPodOperatorSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go index d805f0a63..0067b78c7 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go @@ -20,7 +20,7 @@ func StaticPodOperatorStatus() *StaticPodOperatorStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *StaticPodOperatorStatusApplyConfiguration) WithObservedGeneration(value int64) *StaticPodOperatorStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -32,7 +32,7 @@ func (b *StaticPodOperatorStatusApplyConfiguration) WithConditions(values ...*Op if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -41,7 +41,7 @@ func (b *StaticPodOperatorStatusApplyConfiguration) WithConditions(values ...*Op // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *StaticPodOperatorStatusApplyConfiguration) WithVersion(value string) *StaticPodOperatorStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -49,7 +49,7 @@ func (b *StaticPodOperatorStatusApplyConfiguration) WithVersion(value string) *S // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *StaticPodOperatorStatusApplyConfiguration) WithReadyReplicas(value int32) *StaticPodOperatorStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -57,7 +57,7 @@ func (b *StaticPodOperatorStatusApplyConfiguration) WithReadyReplicas(value int3 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *StaticPodOperatorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *StaticPodOperatorStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -69,7 +69,7 @@ func (b *StaticPodOperatorStatusApplyConfiguration) WithGenerations(values ...*G if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go index 418d1dcc2..fe464c41e 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go @@ -3,21 +3,21 @@ package v1 import ( - apioperatorv1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" internal "github.com/openshift/client-go/operator/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // StorageApplyConfiguration represents a declarative configuration of the Storage type for use // with apply. type StorageApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *StorageSpecApplyConfiguration `json:"spec,omitempty"` - Status *StorageStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *StorageSpecApplyConfiguration `json:"spec,omitempty"` + Status *StorageStatusApplyConfiguration `json:"status,omitempty"` } // Storage constructs a declarative configuration of the Storage type for use with @@ -41,18 +41,18 @@ func Storage(name string) *StorageApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStorage(storage *apioperatorv1.Storage, fieldManager string) (*StorageApplyConfiguration, error) { +func ExtractStorage(storage *operatorv1.Storage, fieldManager string) (*StorageApplyConfiguration, error) { return extractStorage(storage, fieldManager, "") } // ExtractStorageStatus is the same as ExtractStorage except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStorageStatus(storage *apioperatorv1.Storage, fieldManager string) (*StorageApplyConfiguration, error) { +func ExtractStorageStatus(storage *operatorv1.Storage, fieldManager string) (*StorageApplyConfiguration, error) { return extractStorage(storage, fieldManager, "status") } -func extractStorage(storage *apioperatorv1.Storage, fieldManager string, subresource string) (*StorageApplyConfiguration, error) { +func extractStorage(storage *operatorv1.Storage, fieldManager string, subresource string) (*StorageApplyConfiguration, error) { b := &StorageApplyConfiguration{} err := managedfields.ExtractInto(storage, internal.Parser().Type("com.github.openshift.api.operator.v1.Storage"), fieldManager, b, subresource) if err != nil { @@ -69,7 +69,7 @@ func extractStorage(storage *apioperatorv1.Storage, fieldManager string, subreso // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageApplyConfiguration) WithKind(value string) *StorageApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -77,7 +77,7 @@ func (b *StorageApplyConfiguration) WithKind(value string) *StorageApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageApplyConfiguration) WithAPIVersion(value string) *StorageApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -86,7 +86,7 @@ func (b *StorageApplyConfiguration) WithAPIVersion(value string) *StorageApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *StorageApplyConfiguration) WithName(value string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -95,7 +95,7 @@ func (b *StorageApplyConfiguration) WithName(value string) *StorageApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageApplyConfiguration) WithGenerateName(value string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -104,7 +104,7 @@ func (b *StorageApplyConfiguration) WithGenerateName(value string) *StorageApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageApplyConfiguration) WithNamespace(value string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -113,7 +113,7 @@ func (b *StorageApplyConfiguration) WithNamespace(value string) *StorageApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *StorageApplyConfiguration) WithUID(value types.UID) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -122,7 +122,7 @@ func (b *StorageApplyConfiguration) WithUID(value types.UID) *StorageApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageApplyConfiguration) WithResourceVersion(value string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -131,25 +131,25 @@ func (b *StorageApplyConfiguration) WithResourceVersion(value string) *StorageAp // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageApplyConfiguration) WithGeneration(value int64) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *StorageApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageApplyConfiguration { +func (b *StorageApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *StorageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageApplyConfiguration { +func (b *StorageApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -158,7 +158,7 @@ func (b *StorageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *St // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -168,11 +168,11 @@ func (b *StorageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *StorageApplyConfiguration) WithLabels(entries map[string]string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -183,11 +183,11 @@ func (b *StorageApplyConfiguration) WithLabels(entries map[string]string) *Stora // overwriting an existing map entries in Annotations field with the same key. func (b *StorageApplyConfiguration) WithAnnotations(entries map[string]string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -195,13 +195,13 @@ func (b *StorageApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *StorageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StorageApplyConfiguration { +func (b *StorageApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -212,14 +212,14 @@ func (b *StorageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *StorageApplyConfiguration) WithFinalizers(values ...string) *StorageApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *StorageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -242,5 +242,5 @@ func (b *StorageApplyConfiguration) WithStatus(value *StorageStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *StorageApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go index 735723c11..152ea2fe0 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go @@ -24,7 +24,7 @@ func StorageSpec() *StorageSpecApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ManagementState field is set to the value of the last call. func (b *StorageSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *StorageSpecApplyConfiguration { - b.ManagementState = &value + b.OperatorSpecApplyConfiguration.ManagementState = &value return b } @@ -32,7 +32,7 @@ func (b *StorageSpecApplyConfiguration) WithManagementState(value operatorv1.Man // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LogLevel field is set to the value of the last call. func (b *StorageSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *StorageSpecApplyConfiguration { - b.LogLevel = &value + b.OperatorSpecApplyConfiguration.LogLevel = &value return b } @@ -40,7 +40,7 @@ func (b *StorageSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the OperatorLogLevel field is set to the value of the last call. func (b *StorageSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *StorageSpecApplyConfiguration { - b.OperatorLogLevel = &value + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value return b } @@ -48,7 +48,7 @@ func (b *StorageSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.Lo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. func (b *StorageSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *StorageSpecApplyConfiguration { - b.UnsupportedConfigOverrides = &value + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value return b } @@ -56,7 +56,7 @@ func (b *StorageSpecApplyConfiguration) WithUnsupportedConfigOverrides(value run // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedConfig field is set to the value of the last call. func (b *StorageSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *StorageSpecApplyConfiguration { - b.ObservedConfig = &value + b.OperatorSpecApplyConfiguration.ObservedConfig = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go index 13f796797..f6a034906 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go @@ -18,7 +18,7 @@ func StorageStatus() *StorageStatusApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObservedGeneration field is set to the value of the last call. func (b *StorageStatusApplyConfiguration) WithObservedGeneration(value int64) *StorageStatusApplyConfiguration { - b.ObservedGeneration = &value + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value return b } @@ -30,7 +30,7 @@ func (b *StorageStatusApplyConfiguration) WithConditions(values ...*OperatorCond if values[i] == nil { panic("nil value passed to WithConditions") } - b.Conditions = append(b.Conditions, *values[i]) + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) } return b } @@ -39,7 +39,7 @@ func (b *StorageStatusApplyConfiguration) WithConditions(values ...*OperatorCond // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. func (b *StorageStatusApplyConfiguration) WithVersion(value string) *StorageStatusApplyConfiguration { - b.Version = &value + b.OperatorStatusApplyConfiguration.Version = &value return b } @@ -47,7 +47,7 @@ func (b *StorageStatusApplyConfiguration) WithVersion(value string) *StorageStat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadyReplicas field is set to the value of the last call. func (b *StorageStatusApplyConfiguration) WithReadyReplicas(value int32) *StorageStatusApplyConfiguration { - b.ReadyReplicas = &value + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value return b } @@ -55,7 +55,7 @@ func (b *StorageStatusApplyConfiguration) WithReadyReplicas(value int32) *Storag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LatestAvailableRevision field is set to the value of the last call. func (b *StorageStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *StorageStatusApplyConfiguration { - b.LatestAvailableRevision = &value + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value return b } @@ -67,7 +67,7 @@ func (b *StorageStatusApplyConfiguration) WithGenerations(values ...*GenerationS if values[i] == nil { panic("nil value passed to WithGenerations") } - b.Generations = append(b.Generations, *values[i]) + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go index 07eca5ee3..8f666cd18 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go @@ -3,15 +3,15 @@ package v1 import ( - v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/api/operator/v1" ) // UpstreamApplyConfiguration represents a declarative configuration of the Upstream type for use // with apply. type UpstreamApplyConfiguration struct { - Type *v1.UpstreamType `json:"type,omitempty"` - Address *string `json:"address,omitempty"` - Port *uint32 `json:"port,omitempty"` + Type *operatorv1.UpstreamType `json:"type,omitempty"` + Address *string `json:"address,omitempty"` + Port *uint32 `json:"port,omitempty"` } // UpstreamApplyConfiguration constructs a declarative configuration of the Upstream type for use with @@ -23,7 +23,7 @@ func Upstream() *UpstreamApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *UpstreamApplyConfiguration) WithType(value v1.UpstreamType) *UpstreamApplyConfiguration { +func (b *UpstreamApplyConfiguration) WithType(value operatorv1.UpstreamType) *UpstreamApplyConfiguration { b.Type = &value return b } diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go index 8491bc9e9..1709b2395 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go +++ b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go @@ -3,32 +3,31 @@ package v1helpers import ( "bytes" "fmt" - "strings" - "time" - "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/utils/clock" + "strings" configv1 "github.com/openshift/api/config/v1" ) // SetStatusCondition sets the corresponding condition in conditions to newCondition. -func SetStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) { +func SetStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition, clock clock.PassiveClock) { if conditions == nil { conditions = &[]configv1.ClusterOperatorStatusCondition{} } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + newCondition.LastTransitionTime = metav1.NewTime(clock.Now()) *conditions = append(*conditions, newCondition) return } if existingCondition.Status != newCondition.Status { existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + existingCondition.LastTransitionTime = metav1.NewTime(clock.Now()) } existingCondition.Reason = newCondition.Reason diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go index 0b406b35b..b1bac1e27 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go @@ -14,8 +14,8 @@ import ( "k8s.io/apimachinery/pkg/version" genericapiserver "k8s.io/apiserver/pkg/server" genericapiserveroptions "k8s.io/apiserver/pkg/server/options" - kasversion "k8s.io/apiserver/pkg/util/version" "k8s.io/client-go/kubernetes" + kasversion "k8s.io/component-base/version" "k8s.io/klog/v2" configv1 "github.com/openshift/api/config/v1" diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go index 1f9b15a72..8b693784f 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go @@ -3,11 +3,13 @@ package factory import ( "context" "fmt" + "reflect" "time" "github.com/robfig/cron" "k8s.io/apimachinery/pkg/runtime" errorutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" "github.com/openshift/library-go/pkg/operator/events" @@ -245,6 +247,11 @@ func (f *Factory) WithControllerInstanceName(controllerInstanceName string) *Fac return f } +type informerHandleTuple struct { + informer Informer + filter uintptr +} + // Controller produce a runnable controller. func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller { if f.sync == nil { @@ -286,19 +293,37 @@ func (f *Factory) ToController(name string, eventRecorder events.Recorder) Contr cacheSyncTimeout: defaultCacheSyncTimeout, } + // avoid adding an informer more than once + informerQueueKeySet := sets.New[informerHandleTuple]() for i := range f.informerQueueKeys { for d := range f.informerQueueKeys[i].informers { informer := f.informerQueueKeys[i].informers[d] queueKeyFn := f.informerQueueKeys[i].queueKeyFn - informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + tuple := informerHandleTuple{ + informer: informer, + filter: reflect.ValueOf(f.informerQueueKeys[i].filter).Pointer(), + } + if !informerQueueKeySet.Has(tuple) { + sets.Insert(informerQueueKeySet, tuple) + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + } c.cachesToSync = append(c.cachesToSync, informer.HasSynced) } } + // avoid adding an informer more than once + informerSet := sets.New[informerHandleTuple]() for i := range f.informers { for d := range f.informers[i].informers { informer := f.informers[i].informers[d] - informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter)) + tuple := informerHandleTuple{ + informer: informer, + filter: reflect.ValueOf(f.informers[i].filter).Pointer(), + } + if !informerSet.Has(tuple) { + sets.Insert(informerSet, tuple) + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter)) + } c.cachesToSync = append(c.cachesToSync, informer.HasSynced) } } diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index e6651fecc..80f5efc2c 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -110,15 +110,6 @@ func DefaultTLSVersion() uint16 { return tls.VersionTLS12 } -// ciphersTLS13 copies golang 1.13 implementation, where TLS1.3 suites are not -// configurable (cipherSuites field is ignored for TLS1.3 flows and all of the -// below three - and none other - are used) -var ciphersTLS13 = map[string]uint16{ - "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, - "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, - "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, -} - var ciphers = map[string]uint16{ "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, @@ -144,6 +135,9 @@ var ciphers = map[string]uint16{ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, } // openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names @@ -223,10 +217,6 @@ func CipherSuite(cipherName string) (uint16, error) { return cipher, nil } - if _, ok := ciphersTLS13[cipherName]; ok { - return 0, fmt.Errorf("all golang TLSv1.3 ciphers are always used for TLSv1.3 flows") - } - return 0, fmt.Errorf("unknown cipher name %q", cipherName) } @@ -281,6 +271,9 @@ func DefaultCiphers() []uint16 { // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + tls.TLS_CHACHA20_POLY1305_SHA256, } } @@ -393,7 +386,7 @@ func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, e } certs, err := cert.ParseCertsPEM(certPEMBlock) if err != nil { - return nil, fmt.Errorf("Error reading %s: %s", certFile, err) + return nil, fmt.Errorf("error reading %s: %s", certFile, err) } keyPEMBlock, err := os.ReadFile(keyFile) @@ -419,7 +412,7 @@ func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertifica certs, err := cert.ParseCertsPEM(certBytes) if err != nil { - return nil, fmt.Errorf("Error reading cert: %s", err) + return nil, fmt.Errorf("error reading cert: %s", err) } keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) @@ -432,8 +425,8 @@ func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertifica } const ( - DefaultCertificateLifetimeInDays = 365 * 2 // 2 years - DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years + DefaultCertificateLifetimeDuration = time.Hour * 24 * 365 * 2 // 2 years + DefaultCACertificateLifetimeDuration = time.Hour * 24 * 365 * 5 // 5 years // Default keys are 2048 bits keyBits = 2048 @@ -553,11 +546,11 @@ func randomSerialNumber() int64 { // EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error // if serialFile is empty, a RandomSerialGenerator will be used -func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { +func EnsureCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, bool, error) { if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { return ca, false, err } - ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays) + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, lifetime) return ca, true, err } @@ -597,10 +590,10 @@ func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { } // if serialFile is empty, a RandomSerialGenerator will be used -func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, error) { klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) - caConfig, err := MakeSelfSignedCAConfig(name, expireDays) + caConfig, err := MakeSelfSignedCAConfig(name, lifetime) if err != nil { return nil, err } @@ -628,23 +621,21 @@ func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int }, nil } -func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { +func MakeSelfSignedCAConfig(name string, lifetime time.Duration) (*TLSCertificateConfig, error) { subject := pkix.Name{CommonName: name} - return MakeSelfSignedCAConfigForSubject(subject, expireDays) + return MakeSelfSignedCAConfigForSubject(subject, lifetime) } -func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) { - var caLifetimeInDays = DefaultCACertificateLifetimeInDays - if expireDays > 0 { - caLifetimeInDays = expireDays +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, lifetime time.Duration) (*TLSCertificateConfig, error) { + if lifetime <= 0 { + lifetime = DefaultCACertificateLifetimeDuration + fmt.Fprintf(os.Stderr, "Validity period of the certificate for %q is unset, resetting to %d years!\n", subject.CommonName, lifetime) } - if caLifetimeInDays > DefaultCACertificateLifetimeInDays { - warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) + if lifetime > DefaultCACertificateLifetimeDuration { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeDuration) } - - caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour - return makeSelfSignedCAConfigForSubjectAndDuration(subject, time.Now, caLifetime) + return makeSelfSignedCAConfigForSubjectAndDuration(subject, time.Now, lifetime) } func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { @@ -702,21 +693,21 @@ func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) // (as opposed to pre-existing), and any error that might occur during the subCA // creation. // If serialFile is an empty string, a RandomSerialGenerator will be used. -func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { +func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, bool, error) { if subCA, err := GetCA(certFile, keyFile, serialFile); err == nil { return subCA, false, err } - subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, expireDays) + subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, lifetime) return subCA, true, err } // MakeAndWriteSubCA returns a new sub-CA configuration. New cert/key pair is generated // while using this function. // If serialFile is an empty string, a RandomSerialGenerator will be used. -func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { +func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, error) { klog.V(4).Infof("Generating sub-CA certificate in %s, key in %s, serial in %s", certFile, keyFile, serialFile) - subCAConfig, err := MakeCAConfigForDuration(name, time.Duration(expireDays)*time.Hour*24, ca) + subCAConfig, err := MakeCAConfigForDuration(name, lifetime, ca) if err != nil { return nil, err } @@ -746,10 +737,10 @@ func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, expi }, nil } -func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.Set[string], expireDays int) (*TLSCertificateConfig, bool, error) { +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.Set[string], lifetime time.Duration) (*TLSCertificateConfig, bool, error) { certConfig, err := GetServerCert(certFile, keyFile, hostnames) if err != nil { - certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays) + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, lifetime) return certConfig, true, err } @@ -773,13 +764,13 @@ func GetServerCert(certFile, keyFile string, hostnames sets.Set[string]) (*TLSCe return server, nil } - return nil, fmt.Errorf("Existing server certificate in %s does not match required hostnames.", certFile) + return nil, fmt.Errorf("existing server certificate in %s does not match required hostnames", certFile) } -func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.Set[string], expireDays int) (*TLSCertificateConfig, error) { +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.Set[string], lifetime time.Duration) (*TLSCertificateConfig, error) { klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) - server, err := ca.MakeServerCert(hostnames, expireDays) + server, err := ca.MakeServerCert(hostnames, lifetime) if err != nil { return nil, err } @@ -793,11 +784,11 @@ func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.Se // if the extension attempt failed. type CertificateExtensionFunc func(*x509.Certificate) error -func (ca *CA) MakeServerCert(hostnames sets.Set[string], expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { +func (ca *CA) MakeServerCert(hostnames sets.Set[string], lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() authorityKeyId := ca.Config.Certs[0].SubjectKeyId subjectKeyId := publicKeyHash - serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: sets.List(hostnames)[0]}, sets.List(hostnames), expireDays, time.Now, authorityKeyId, subjectKeyId) + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: sets.List(hostnames)[0]}, sets.List(hostnames), lifetime, time.Now, authorityKeyId, subjectKeyId) for _, fn := range fns { if err := fn(serverTemplate); err != nil { return nil, err @@ -835,10 +826,10 @@ func (ca *CA) MakeServerCertForDuration(hostnames sets.Set[string], lifetime tim return server, nil } -func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) { +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, lifetime time.Duration) (*TLSCertificateConfig, bool, error) { certConfig, err := GetClientCertificate(certFile, keyFile, u) if err != nil { - certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays) + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, lifetime) return certConfig, true, err // true indicates we wrote the files. } return certConfig, false, nil @@ -867,7 +858,7 @@ func subjectChanged(existing, expected pkix.Name) bool { !reflect.DeepEqual(existing.Organization, expected.Organization) } -func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) { +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) // ensure parent dirs if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { @@ -878,7 +869,7 @@ func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expir } clientPublicKey, clientPrivateKey, _ := NewKeyPair() - clientTemplate := NewClientCertificateTemplate(UserToSubject(u), expireDays, time.Now) + clientTemplate := NewClientCertificateTemplate(UserToSubject(u), lifetime, time.Now) clientCrt, err := ca.SignCertificate(clientTemplate, clientPublicKey) if err != nil { return nil, err @@ -1024,18 +1015,16 @@ func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time } // Can be used for ListenAndServeTLS -func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { - var lifetimeInDays = DefaultCertificateLifetimeInDays - if expireDays > 0 { - lifetimeInDays = expireDays +func newServerCertificateTemplate(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + if lifetime <= 0 { + lifetime = DefaultCertificateLifetimeDuration + fmt.Fprintf(os.Stderr, "Validity period of the certificate for %q is unset, resetting to %d years!\n", subject.CommonName, lifetime) } - if lifetimeInDays > DefaultCertificateLifetimeInDays { - warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + if lifetime > DefaultCertificateLifetimeDuration { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeDuration) } - lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour - return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId) } @@ -1107,24 +1096,22 @@ func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { } if !ok { - return certs, errors.New("Could not read any certificates") + return certs, errors.New("could not read any certificates") } return certs, nil } // Can be used as a certificate in http.Transport TLSClientConfig -func NewClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { - var lifetimeInDays = DefaultCertificateLifetimeInDays - if expireDays > 0 { - lifetimeInDays = expireDays +func NewClientCertificateTemplate(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + if lifetime <= 0 { + lifetime = DefaultCertificateLifetimeDuration + fmt.Fprintf(os.Stderr, "Validity period of the certificate for %q is unset, resetting to %d years!\n", subject.CommonName, lifetime) } - if lifetimeInDays > DefaultCertificateLifetimeInDays { - warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + if lifetime > DefaultCertificateLifetimeDuration { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeDuration) } - lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour - return NewClientCertificateTemplateForDuration(subject, lifetime, currentTime) } @@ -1145,8 +1132,8 @@ func NewClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Du } } -func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) { - defaultLifetimeInYears := defaultLifetimeInDays / 365 +func warnAboutCertificateLifeTime(name string, defaultLifetimeDuration time.Duration) { + defaultLifetimeInYears := defaultLifetimeDuration / 365 / 24 fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") } @@ -1161,7 +1148,7 @@ func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, is return nil, err } if len(certs) != 1 { - return nil, errors.New("Expected a single certificate") + return nil, errors.New("expected a single certificate") } return certs[0], nil } @@ -1191,7 +1178,7 @@ func EncodeKey(key crypto.PrivateKey) ([]byte, error) { return []byte{}, err } default: - return []byte{}, errors.New("Unrecognized key type") + return []byte{}, errors.New("unrecognized key type") } return b.Bytes(), nil diff --git a/vendor/github.com/openshift/library-go/pkg/operator/deploymentcontroller/deployment_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/deploymentcontroller/deployment_controller.go index fbb78603b..4a4f9bbc5 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/deploymentcontroller/deployment_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/deploymentcontroller/deployment_controller.go @@ -272,7 +272,11 @@ func (c *DeploymentController) syncManaged(ctx context.Context, opSpec *opv1.Ope availableCondition := applyoperatorv1. OperatorCondition().WithType(c.instanceName + opv1.OperatorStatusTypeAvailable) if deployment.Status.AvailableReplicas > 0 { - availableCondition = availableCondition.WithStatus(opv1.ConditionTrue) + availableCondition = availableCondition. + WithStatus(opv1.ConditionTrue). + WithMessage("Deployment is available"). + WithReason("AsExpected") + } else { availableCondition = availableCondition. WithStatus(opv1.ConditionFalse). @@ -286,13 +290,23 @@ func (c *DeploymentController) syncManaged(ctx context.Context, opSpec *opv1.Ope if slices.Contains(c.conditions, opv1.OperatorStatusTypeProgressing) { progressingCondition := applyoperatorv1.OperatorCondition(). WithType(c.instanceName + opv1.OperatorStatusTypeProgressing). - WithStatus(opv1.ConditionFalse) + WithStatus(opv1.ConditionFalse). + WithMessage("Deployment is not progressing"). + WithReason("AsExpected") + if ok, msg := isProgressing(deployment); ok { progressingCondition = progressingCondition. WithStatus(opv1.ConditionTrue). WithMessage(msg). WithReason("Deploying") + + // Degrade when operator is progressing too long. + // Only do this if we would continue to be in the Progressing state, otherwise, we'll never get out + if v1helpers.IsUpdatingTooLong(opStatus, c.instanceName+opv1.OperatorStatusTypeProgressing) { + return fmt.Errorf("Deployment was progressing too long") + } } + status = status.WithConditions(progressingCondition) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go index 357efad61..9105464bd 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -9,6 +9,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" @@ -142,6 +143,12 @@ func ApplyDirectly(ctx context.Context, clients *ClientHolder, recorder events.R } else { result.Result, result.Changed, result.Error = ApplySecretImproved(ctx, client, recorder, t, cache) } + case *networkingv1.NetworkPolicy: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyNetworkPolicy(ctx, clients.kubeClient.NetworkingV1(), recorder, t) + } case *rbacv1.ClusterRole: if clients.kubeClient == nil { result.Error = fmt.Errorf("missing kubeClient") @@ -208,6 +215,18 @@ func ApplyDirectly(ctx context.Context, clients *ClientHolder, recorder events.R } else { result.Result, result.Changed, result.Error = ApplyValidatingAdmissionPolicyBindingV1beta1(ctx, clients.kubeClient.AdmissionregistrationV1beta1(), recorder, t, cache) } + case *admissionregistrationv1.ValidatingAdmissionPolicy: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyValidatingAdmissionPolicyV1(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache) + } + case *admissionregistrationv1.ValidatingAdmissionPolicyBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyValidatingAdmissionPolicyBindingV1(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache) + } case *storagev1.CSIDriver: if clients.kubeClient == nil { result.Error = fmt.Errorf("missing kubeClient") @@ -295,6 +314,12 @@ func DeleteAll(ctx context.Context, clients *ClientHolder, recorder events.Recor } else { _, result.Changed, result.Error = DeleteSecret(ctx, client, recorder, t) } + case *networkingv1.NetworkPolicy: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteNetworkPolicy(ctx, clients.kubeClient.NetworkingV1(), recorder, t) + } case *rbacv1.ClusterRole: if clients.kubeClient == nil { result.Error = fmt.Errorf("missing kubeClient") diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go index ac9699aff..84b50fde1 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -3,7 +3,7 @@ package resourceapply import ( "fmt" - patch "github.com/evanphx/json-patch" + patch "gopkg.in/evanphx/json-patch.v4" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/networking.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/networking.go new file mode 100644 index 000000000..0a3df326e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/networking.go @@ -0,0 +1,59 @@ +package resourceapply + +import ( + "context" + + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingclientv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + "k8s.io/klog/v2" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyClusterRole merges objectmeta, does not worry about anything else +func ApplyNetworkPolicy(ctx context.Context, client networkingclientv1.NetworkPoliciesGetter, recorder events.Recorder, required *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, bool, error) { + existing, err := client.NetworkPolicies(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.NetworkPolicies(required.Namespace).Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*networkingv1.NetworkPolicy), metav1.CreateOptions{}) + resourcehelper.ReportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := false + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(&modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if equality.Semantic.DeepEqual(existingCopy.Spec, required.Spec) && !modified { + return existingCopy, false, nil + } + + if klog.V(2).Enabled() { + klog.Infof("NetworkPolicy %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.NetworkPolicies(existingCopy.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + resourcehelper.ReportUpdateEvent(recorder, required, err) + return actual, true, err +} + +func DeleteNetworkPolicy(ctx context.Context, client networkingclientv1.NetworkPoliciesGetter, recorder events.Recorder, required *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, bool, error) { + err := client.NetworkPolicies(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + resourcehelper.ReportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiregistration.go new file mode 100644 index 000000000..05a4146ec --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiregistration.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" +) + +var ( + apiRegisterScheme = runtime.NewScheme() + apiRegisterCodec = serializer.NewCodecFactory(apiRegisterScheme) +) + +func init() { + if err := apiregistrationv1.AddToScheme(apiRegisterScheme); err != nil { + panic(err) + } +} + +func ReadAPIServiceOrDie(objBytes []byte) *apiregistrationv1.APIService { + requiredObj, err := runtime.Decode(apiRegisterCodec.UniversalDecoder(apiregistrationv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiregistrationv1.APIService) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go new file mode 100644 index 000000000..9832ede71 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + netScheme = runtime.NewScheme() + netCodecs = serializer.NewCodecFactory(netScheme) +) + +func init() { + if err := networkingv1.AddToScheme(netScheme); err != nil { + panic(err) + } +} + +func ReadNetworkPolicyV1OrDie(objBytes []byte) *networkingv1.NetworkPolicy { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(networkingv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*networkingv1.NetworkPolicy) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticresourcecontroller/static_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticresourcecontroller/static_resource_controller.go index 234cc08eb..22a377a7c 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticresourcecontroller/static_resource_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticresourcecontroller/static_resource_controller.go @@ -17,6 +17,7 @@ import ( "k8s.io/client-go/restmapper" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" @@ -236,6 +237,8 @@ func (c *StaticResourceController) AddKubeInformers(kubeInformersByNamespace v1h ret = ret.AddInformer(informer.Core().V1().ConfigMaps().Informer()) case *corev1.Secret: ret = ret.AddInformer(informer.Core().V1().Secrets().Informer()) + case *networkingv1.NetworkPolicy: + ret = ret.AddInformer(informer.Networking().V1().NetworkPolicies().Informer()) case *rbacv1.ClusterRole: ret = ret.AddInformer(informer.Rbac().V1().ClusterRoles().Informer()) case *rbacv1.ClusterRoleBinding: diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go index 2f7bf95d7..c27055152 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go @@ -2,6 +2,7 @@ package status import ( "context" + "k8s.io/utils/clock" "strings" "time" @@ -40,6 +41,7 @@ type StatusSyncer struct { clusterOperatorName string relatedObjects []configv1.ObjectReference relatedObjectsFunc RelatedObjectsFunc + clock clock.PassiveClock versionGetter VersionGetter operatorClient operatorv1helpers.OperatorClient @@ -67,10 +69,12 @@ func NewClusterOperatorStatusController( operatorClient operatorv1helpers.OperatorClient, versionGetter VersionGetter, recorder events.Recorder, + clock clock.PassiveClock, ) *StatusSyncer { return &StatusSyncer{ clusterOperatorName: name, relatedObjects: relatedObjects, + clock: clock, versionGetter: versionGetter, clusterOperatorClient: clusterOperatorClient, clusterOperatorLister: clusterOperatorInformer.Lister(), @@ -168,11 +172,11 @@ func (c StatusSyncer) Sync(ctx context.Context, syncCtx factory.SyncContext) err clusterOperatorObj := originalClusterOperatorObj.DeepCopy() if detailedSpec.ManagementState == operatorv1.Unmanaged && !management.IsOperatorAlwaysManaged() { - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.EvaluationConditionsDetected, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}, c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}, c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}, c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}, c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.EvaluationConditionsDetected, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}, c.clock) if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) { return nil @@ -210,11 +214,11 @@ func (c StatusSyncer) Sync(ctx context.Context, syncCtx factory.SyncContext) err clusterOperatorObj.Status.RelatedObjects = c.relatedObjects } - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorDegraded, operatorv1.ConditionFalse, c.degradedInertia, currentDetailedStatus.Conditions...)) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorProgressing, operatorv1.ConditionFalse, nil, currentDetailedStatus.Conditions...)) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorAvailable, operatorv1.ConditionTrue, nil, currentDetailedStatus.Conditions...)) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorUpgradeable, operatorv1.ConditionTrue, nil, currentDetailedStatus.Conditions...)) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.EvaluationConditionsDetected, operatorv1.ConditionFalse, nil, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorDegraded, operatorv1.ConditionFalse, c.degradedInertia, currentDetailedStatus.Conditions...), c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorProgressing, operatorv1.ConditionFalse, nil, currentDetailedStatus.Conditions...), c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorAvailable, operatorv1.ConditionTrue, nil, currentDetailedStatus.Conditions...), c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorUpgradeable, operatorv1.ConditionTrue, nil, currentDetailedStatus.Conditions...), c.clock) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.EvaluationConditionsDetected, operatorv1.ConditionFalse, nil, currentDetailedStatus.Conditions...), c.clock) c.syncStatusVersions(clusterOperatorObj, syncCtx) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go index e69ec34af..fd34ec620 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -23,6 +23,10 @@ import ( "sigs.k8s.io/yaml" ) +const ( + progressingConditionTimeout = 15 * time.Minute +) + // SetOperandVersion sets the new version and returns the previous value. func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { if versions == nil { @@ -352,6 +356,10 @@ func NewMultiLineAggregate(errList []error) error { if len(errs) == 0 { return nil } + // We sort errors to allow for consistent output for testing. + sort.SliceStable(errs, func(i, j int) bool { + return errs[i].Error() < errs[j].Error() + }) return aggregate(errs) } @@ -550,3 +558,10 @@ func IsConditionPresentAndEqual(conditions []metav1.Condition, conditionType str } return false } + +// IsUpdatingTooLong determines if updating operands condition takes too long. +// It returns true if the given condition was found and has been set to True longer than progressingConditionTimeout. +func IsUpdatingTooLong(operatorStatus *operatorv1.OperatorStatus, progressingConditionType string) bool { + progressing := FindOperatorCondition(operatorStatus.Conditions, progressingConditionType) + return progressing != nil && progressing.Status == operatorv1.ConditionTrue && time.Now().After(progressing.LastTransitionTime.Add(progressingConditionTimeout)) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go new file mode 100644 index 000000000..9a71a15db --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// CollectorFunc is a convenient way to implement a Prometheus Collector +// without interface boilerplate. +// This implementation is based on DescribeByCollect method. +// familiarize yourself to it before using. +type CollectorFunc func(chan<- Metric) + +// Collect calls the defined CollectorFunc function with the provided Metrics channel +func (f CollectorFunc) Collect(ch chan<- Metric) { + f(ch) +} + +// Describe sends the descriptor information using DescribeByCollect +func (f CollectorFunc) Describe(ch chan<- *Desc) { + DescribeByCollect(f, ch) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 68ffe3c24..ad347113c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -189,12 +189,15 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } - vlStrings := make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) + vlStrings := []string{} + if d.variableLabels != nil { + vlStrings = make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } } } return fmt.Sprintf( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 511746417..6b8684731 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } func attachOriginalName(desc, origName string) string { - return fmt.Sprintf("%s Sourced from %s", desc, origName) + return fmt.Sprintf("%s Sourced from %s.", desc, origName) } // Describe returns all descriptions of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 519db348a..c453b754a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "math" "runtime" @@ -28,6 +29,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" ) +const ( + nativeHistogramSchemaMaximum = 8 + nativeHistogramSchemaMinimum = -4 +) + // nativeHistogramBounds for the frac of observed values. Only relevant for // schema > 0. The position in the slice is the schema. (0 is never used, just // here for convenience of using the schema directly as the index.) @@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { +func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { if count < 1 { panic("ExponentialBucketsRange count needs a positive count") } - if min <= 0 { + if minBucket <= 0 { panic("ExponentialBucketsRange min needs to be greater than 0") } @@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 { // max = min*growthFactor^(bucketCount-1) // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) // Now that we know growthFactor, solve for each bucket. buckets := make([]float64, count) for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) } return buckets } @@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error { // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + n := len(h.upperBounds) + if n == 0 { + return 0 + } + + // Early exit: if v is less than or equal to the first upper bound, return 0 + if v <= h.upperBounds[0] { + return 0 + } + + // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) + if v > h.upperBounds[n-1] { + return n + } + + // For small arrays, use simple linear search + // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers + // see more details here: https://github.com/prometheus/client_golang/pull/1662 + if n < 35 { + for i, bound := range h.upperBounds { + if v <= bound { + return i + } + } + // If v is greater than all upper bounds, return len(h.upperBounds) + return n + } + + // For larger arrays, use stdlib's binary search return sort.SearchFloat64s(h.upperBounds, v) } @@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 { floor := math.Floor(math.Log2(math.Log2(bucketFactor))) switch { case floor <= -8: - return 8 + return nativeHistogramSchemaMaximum case floor >= 4: - return -4 + return nativeHistogramSchemaMinimum default: return -int32(floor) } @@ -1835,3 +1861,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) } } + +type constNativeHistogram struct { + desc *Desc + dto.Histogram + labelPairs []*dto.LabelPair +} + +func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { + var bucketPopulationSum int64 + for _, v := range positiveBuckets { + bucketPopulationSum += v + } + for _, v := range negativeBuckets { + bucketPopulationSum += v + } + bucketPopulationSum += int64(zeroBucket) + + // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. + // Otherwise, the number of observations must be equal to the sum of all bucket counts . + + if math.IsNaN(sum) && bucketPopulationSum > int64(count) || + !math.IsNaN(sum) && bucketPopulationSum != int64(count) { + return errors.New("the sum of all bucket populations exceeds the count of observations") + } + return nil +} + +// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with +// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// zeroBucket counts all (positive and negative) +// observations in the zero bucket (with an absolute value less or equal +// the current threshold). +// positiveBuckets and negativeBuckets are separate maps for negative and positive +// observations. The map's value is an int64, counting observations in +// that bucket. The map's key is the +// index of the bucket according to the used +// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. +// NewConstNativeHistogram returns an error if +// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. +// - the schema passed is not between 8 and -4 +// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) +// +// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. +func NewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} + +// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where +// NewConstNativeHistogram would have returned an error. +func MustNewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + nativeHistogramSchema int32, + nativeHistogramZeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) Metric { + nativehistogram, err := NewConstNativeHistogram(desc, + count, + sum, + positiveBuckets, + negativeBuckets, + zeroBucket, + nativeHistogramSchema, + nativeHistogramZeroThreshold, + createdTimestamp, + labelValues...) + if err != nil { + panic(err) + } + return nativehistogram +} + +func (h *constNativeHistogram) Desc() *Desc { + return h.desc +} + +func (h *constNativeHistogram) Write(out *dto.Metric) error { + out.Histogram = &h.Histogram + out.Label = h.labelPairs + return nil +} + +func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { + if len(buckets) == 0 { + return nil, nil + } + var ii []int + for k := range buckets { + ii = append(ii, k) + } + sort.Ints(ii) + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + count := buckets[i] + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index a595a2036..8b016355a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -22,17 +22,18 @@ import ( "bytes" "fmt" "io" + "strconv" "strings" ) -func min(a, b int) int { +func minInt(a, b int) int { if a < b { return a } return b } -func max(a, b int) int { +func maxInt(a, b int) int { if a > b { return a } @@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { if codes[0].Tag == 'e' { c := codes[0] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} } if codes[len(codes)-1].Tag == 'e' { c := codes[len(codes)-1] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} } nn := n + n groups := [][]OpCode{} @@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { // there is a large range with no changes. if c.Tag == 'e' && i2-i1 > nn { group = append(group, OpCode{ - c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n), + c.Tag, i1, minInt(i2, i1+n), + j1, minInt(j2, j1+n), }) groups = append(groups, group) group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) + i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } @@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 { // is faster to compute than either .Ratio() or .QuickRatio(). func (m *SequenceMatcher) RealQuickRatio() float64 { la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) + return calculateRatio(minInt(la, lb), la+lb) } // Convert range to the "ed" format @@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string { beginning := start + 1 // lines start numbering with one length := stop - start if length == 1 { - return fmt.Sprintf("%d", beginning) + return strconv.Itoa(beginning) } if length == 0 { beginning-- // empty ranges begin at line just before the range diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index 97d17d6cb..f7f97ef92 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) name += "_total" } - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + // Our current conversion moves to legacy naming, so use legacy validation. + valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 9d9b81ab4..592eec3e2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") + + sb := strings.Builder{} + sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) + + if namespace != "" { + sb.WriteString(namespace) + sb.WriteString("_") } - return name + + if subsystem != "" { + sb.WriteString(subsystem) + sb.WriteString("_") + } + + sb.WriteString(name) + + return sb.String() } type invalidMetric struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 62a4e7ad9..e7bce8b58 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -23,6 +23,7 @@ import ( type processCollector struct { collectFn func(chan<- Metric) + describeFn func(chan<- *Desc) pidFn func() (int, error) reportErrors bool cpuTotal *Desc @@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect + c.describeFn = c.describe } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } + c.collectFn = c.errorCollectFn + c.describeFn = c.errorDescribeFn } return c } -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime - ch <- c.inBytes - ch <- c.outBytes +func (c *processCollector) errorCollectFn(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) +} + +func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { + if c.reportErrors { + ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) + } } // Collect returns the current state of all metrics of the collector. @@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + c.describeFn(ch) +} + func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go new file mode 100644 index 000000000..0a61b9846 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -0,0 +1,130 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios + +package prometheus + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// isn't available. +var notImplementedErr = errors.New("not implemented") + +type memoryInfo struct { + vsize uint64 // Virtual memory size in bytes + rss uint64 // Resident memory size in bytes +} + +func canCollectProcess() bool { + return true +} + +func getSoftLimit(which int) (uint64, error) { + rlimit := syscall.Rlimit{} + + if err := syscall.Getrlimit(which, &rlimit); err != nil { + return 0, err + } + + return rlimit.Cur, nil +} + +func getOpenFileCount() (float64, error) { + // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to + // return a list of open fds, but that requires a way to call C APIs. The + // benefits, however, include fewer system calls and not failing when at the + // open file soft limit. + + if dir, err := os.Open("/dev/fd"); err != nil { + return 0.0, err + } else { + defer dir.Close() + + // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is + // that info not used, but KQUEUE descriptors fail stat(2), which causes + // the whole method to fail. + if names, err := dir.Readdirnames(0); err != nil { + return 0.0, err + } else { + // Subtract 1 to ignore the open /dev/fd descriptor above. + return float64(len(names) - 1), nil + } + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { + if len(procs) == 1 { + startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, c.startTime, err) + } + + // The proc structure returned by kern.proc.pid above has an Rusage member, + // but it is not filled in, so it needs to be fetched by getrusage(2). For + // that call, the UTime, STime, and Maxrss members are filled out, but not + // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require + // access to the C API to call task_info(TASK_BASIC_INFO). + rusage := unix.Rusage{} + + if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { + cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) + } else { + c.reportError(ch, c.cpuTotal, err) + } + + if memInfo, err := getMemory(); err == nil { + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) + } else if !errors.Is(err, notImplementedErr) { + // Don't report an error when support is not compiled in. + c.reportError(ch, c.rss, err) + c.reportError(ch, c.vsize, err) + } + + if fds, err := getOpenFileCount(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) + } else { + c.reportError(ch, c.openFDs, err) + } + + if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) + } else { + c.reportError(ch, c.maxFDs, err) + } + + if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) + } else { + c.reportError(ch, c.maxVsize, err) + } + + // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might + // be able to get the per-process network send/receive counts. +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c new file mode 100644 index 000000000..d00a24315 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c @@ -0,0 +1,84 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +#include +#include +#include + +// The compiler warns that mach/shared_memory_server.h is deprecated, and to use +// mach/shared_region.h instead. But that doesn't define +// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and +// avoid a warning message when running tests. +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 + + +int get_memory_info(unsigned long long *rss, unsigned long long *vsize) +{ + // This is lightly adapted from how ps(1) obtains its memory info. + // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 + + kern_return_t error; + task_t task = MACH_PORT_NULL; + mach_task_basic_info_data_t info; + mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; + + error = task_info( + mach_task_self(), + MACH_TASK_BASIC_INFO, + (task_info_t) &info, + &info_count ); + + if( error != KERN_SUCCESS ) + { + return error; + } + + *rss = info.resident_size; + *vsize = info.virtual_size; + + { + vm_region_basic_info_data_64_t b_info; + mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; + mach_vm_size_t size; + mach_port_t object_name; + + /* + * try to determine if this task has the split libraries + * mapped in... if so, adjust its virtual size down by + * the 2 segments that are used for split libraries + */ + info_count = VM_REGION_BASIC_INFO_COUNT_64; + + error = mach_vm_region( + mach_task_self(), + &address, + &size, + VM_REGION_BASIC_INFO_64, + (vm_region_info_t) &b_info, + &info_count, + &object_name); + + if (error == KERN_SUCCESS) { + if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && + *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { + *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); + } + } + } + + return 0; +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go new file mode 100644 index 000000000..9ac53f999 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go @@ -0,0 +1,51 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +package prometheus + +/* +int get_memory_info(unsigned long long *rss, unsigned long long *vs); +*/ +import "C" +import "fmt" + +func getMemory() (*memoryInfo, error) { + var rss, vsize C.ulonglong + + if err := C.get_memory_info(&rss, &vsize); err != 0 { + return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) + } + + return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + ch <- c.rss + ch <- c.vsize + + /* the process could be collected but not implemented yet + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go new file mode 100644 index 000000000..8ddb0995d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && !cgo + +package prometheus + +func getMemory() (*memoryInfo, error) { + return nil, notImplementedErr +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + + /* the process could be collected but not implemented yet + ch <- c.rss + ch <- c.vsize + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go similarity index 55% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go index d8d9a6d7a..7732b7f37 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build wasip1 -// +build wasip1 +//go:build wasip1 || js || ios +// +build wasip1 js ios package prometheus @@ -20,7 +20,14 @@ func canCollectProcess() bool { return false } -func (*processCollector) processCollect(chan<- Metric) { - // noop on this platform - return +func (c *processCollector) processCollect(ch chan<- Metric) { + c.errorCollectFn(ch) +} + +// describe returns all descriptions of the collector for wasip1 and js. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + c.errorDescribeFn(ch) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go similarity index 77% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 14d56d2d0..9f4b130be 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js && !wasip1 -// +build !windows,!js,!wasip1 +//go:build !windows && !js && !wasip1 && !darwin +// +build !windows,!js,!wasip1,!darwin package prometheus @@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { c.reportError(ch, nil, err) } } + +// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df..fa474289e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) { } func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } + h := windows.CurrentProcess() var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return @@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } +// describe returns all descriptions of the collector for windows. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index e598e66e6..763d99e36 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -41,11 +41,11 @@ import ( "sync" "time" - "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp/internal" ) const ( @@ -65,7 +65,13 @@ const ( Zstd Compression = "zstd" ) -var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} +func defaultCompressionFormats() []Compression { + if internal.NewZstdWriter != nil { + return []Compression{Identity, Gzip, Zstd} + } else { + return []Compression{Identity, Gzip} + } +} var gzipPool = sync.Pool{ New: func() interface{} { @@ -138,7 +144,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO // Select compression formats to offer based on default or user choice. var compressions []string if !opts.DisableCompression { - offers := defaultCompressionFormats + offers := defaultCompressionFormats() if len(opts.OfferedCompressions) > 0 { offers = opts.OfferedCompressions } @@ -207,7 +213,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO if encodingHeader != string(Identity) { rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) + + var enc expfmt.Encoder + if opts.EnableOpenMetricsTextCreatedSamples { + enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines()) + } else { + enc = expfmt.NewEncoder(w, contentType) + } // handleError handles the error according to opts.ErrorHandling // and returns true if we have to abort after the handling. @@ -408,6 +420,21 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic + // Created Timestamps for counters, histograms and summaries, which for the current + // version of OpenMetrics are defined as extra series with the same name and "_created" + // suffix. See also the OpenMetrics specification for more details + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1 + // + // Created timestamps are used to improve the accuracy of reset detection, + // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality + // if the scraper does not handle those metrics correctly (converting to created timestamp + // instead of leaving those series as-is). New OpenMetrics versions might improve + // this situation. + // + // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion' + // in version 2.50.0 to handle this situation. + EnableOpenMetricsTextCreatedSamples bool // ProcessStartTime allows setting process start timevalue that will be exposed // with "Process-Start-Time-Unix" response header along with the metrics // payload. This allow callers to have efficient transformations to cumulative @@ -445,14 +472,12 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin switch selected { case "zstd": - // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. - z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) - if err != nil { - return nil, "", func() {}, err + if internal.NewZstdWriter == nil { + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } - - z.Reset(rw) - return z, selected, func() { _ = z.Close() }, nil + writer, closeWriter, err := internal.NewZstdWriter(rw) + return writer, selected, closeWriter, err case "gzip": gz := gzipPool.Get().(*gzip.Writer) gz.Reset(rw) @@ -462,6 +487,6 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin return rw, selected, func() {}, nil default: // The content encoding was not implemented yet. - return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go similarity index 70% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go rename to vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go index b1e363d6c..c5039590f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2025 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,16 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build js -// +build js +package internal -package prometheus +import ( + "io" +) -func canCollectProcess() bool { - return false -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - // noop on this platform - return -} +// NewZstdWriter enables zstd write support if non-nil. +var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1ab0e4796..ac5203c6f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { s := &summary{ desc: desc, + now: opts.now, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), @@ -280,6 +281,8 @@ type summary struct { desc *Desc + now func() time.Time + objectives map[float64]float64 sortedObjectives []float64 @@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() - now := time.Now() + now := s.now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } @@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error { s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) + s.swapBufs(s.now()) s.bufMtx.Unlock() s.flushColdBuf() diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go index fdc1e6239..68645ed0a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -14,7 +14,7 @@ package validations import ( - "fmt" + "errors" "reflect" dto "github.com/prometheus/client_model/go" @@ -27,7 +27,7 @@ func LintDuplicateMetric(mf *dto.MetricFamily) []error { for i, m := range mf.Metric { for _, k := range mf.Metric[i+1:] { if reflect.DeepEqual(m.Label, k.Label) { - problems = append(problems, fmt.Errorf("metric not unique")) + problems = append(problems, errors.New("metric not unique")) break } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index e0ac34666..1258508e4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -39,6 +39,7 @@ package testutil import ( "bytes" + "errors" "fmt" "io" "net/http" @@ -46,6 +47,7 @@ import ( "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus" @@ -158,6 +160,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -185,6 +190,9 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err // CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text // exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -197,6 +205,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -205,6 +216,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -277,15 +291,6 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str if metricNames != nil { got = filterMetrics(got, metricNames) expected = filterMetrics(expected, metricNames) - if len(metricNames) > len(got) { - var missingMetricNames []string - for _, name := range metricNames { - if ok := hasMetricByName(got, name); !ok { - missingMetricNames = append(missingMetricNames, name) - } - } - return fmt.Errorf("expected metric name(s) not found: %v", missingMetricNames) - } } return compare(got, expected) @@ -297,20 +302,20 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %w", err) } } - enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %w", err) } } if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { - return fmt.Errorf(diffErr) + return errors.New(diffErr) } return nil } @@ -327,12 +332,3 @@ func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFam } return filtered } - -func hasMetricByName(metrics []*dto.MetricFamily, name string) bool { - for _, mf := range metrics { - if mf.GetName() == name { - return true - } - } - return false -} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa216..1448439b7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d..d7f3d76f5 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd1..b26886560 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f..a21ed4ec1 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -102,7 +102,7 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: -// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9..4b86434b3 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211a..b4607fe4d 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { @@ -775,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 80d1fe944..bd3a39e3e 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22f..73b7aa3e6 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46..abb2c9001 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e..000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a5..5766107cf 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -26,18 +28,21 @@ import ( var ( // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode - // in isolation from other components that don't support UTF-8 may result in - // bugs or other undefined behavior. This value is intended to be set by - // UTF-8-aware binaries as part of their startup. To avoid need for locking, - // this value should be set once, ideally in an init(), before multiple - // goroutines are started. - NameValidationScheme = LegacyValidation - - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 + // mode in isolation from other components that don't support UTF-8 may result + // in bugs or other undefined behavior. This value can be set to + // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To + // avoid need for locking, this value should be set once, ideally in an + // init(), before multiple goroutines are started. + NameValidationScheme = UTF8Validation + + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -440,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: @@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 910b0b71f..8f91a9702 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2ab..6bfc757d1 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038cf..895e6a3e8 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 000000000..2d7bb2bf5 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 000000000..369e3d551 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 000000000..937942c2b --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 000000000..2a9f2dc3b --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1352 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +//go:generate stringer -type=Operation -trimprefix=Diff + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 + //IndexSeparator is used to seperate the array indexes in an index string + IndexSeparator = "," +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +// splice removes amount elements from slice at index index, replacing them with elements. +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + if len(elements) == amount { + // Easy case: overwrite the relevant items. + copy(slice[index:], elements) + return slice + } + if len(elements) < amount { + // Fewer new items than old. + // Copy in the new items. + copy(slice[index:], elements) + // Shift the remaining items left. + copy(slice[index+len(elements):], slice[index+amount:]) + // Calculate the new end of the slice. + end := len(slice) - amount + len(elements) + // Zero stranded elements at end so that they can be garbage collected. + tail := slice[end:] + for i := range tail { + tail[i] = Diff{} + } + return slice[:end] + } + // More new items than old. + // Make room in slice for new elements. + // There's probably an even more efficient way to do this, + // but this is simple and clear. + need := len(slice) - amount + len(elements) + for len(slice) < need { + slice = append(slice, Diff{}) + } + // Shift slice elements right to make room for new elements. + copy(slice[index+len(elements):], slice[index+amount:]) + // Copy in new elements. + copy(slice[index:], elements) + return slice +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + {DiffDelete, string(text1)}, + {DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + diffs := diffsA + diffs = append(diffs, Diff{DiffEqual, string(midCommon)}) + diffs = append(diffs, diffsB...) + return diffs + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.DiffLinesToRunes(string(text1), string(text2)) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + {DiffDelete, string(runes1)}, + {DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return chars1, chars2, lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return []rune(chars1), []rune(chars2), lineArray +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := strings.Split(aDiff.Text, IndexSeparator) + text := make([]string, len(chars)) + + for i, r := range chars { + i1, err := strconv.Atoi(r) + if err == nil { + text[i] = lineArray[i1] + } + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + // Linear search. See comment in commonSuffixLength. + n := 0 + for ; n < len(text1) && n < len(text2); n++ { + if text1[n] != text2[n] { + return n + } + } + return n +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/. + // See discussion at https://github.com/sergi/go-diff/issues/54. + i1 := len(text1) + i2 := len(text2) + for n := 0; ; n++ { + i1-- + i2-- + if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] { + return n + } + } +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + equalities := make([]int, 0, len(diffs)) + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + equalities = append(equalities, pointer) + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += utf8.RuneCountInString(diffs[pointer].Text) + } else { + lengthDeletions2 += utf8.RuneCountInString(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if utf8.RuneCountInString(lastequality) > 0 && + (utf8.RuneCountInString(lastequality) <= difference1) && + (utf8.RuneCountInString(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities[len(equalities)-1] + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities[:len(equalities)-1] + + if len(equalities) > 0 { + equalities = equalities[:len(equalities)-1] + } + pointer = -1 + if len(equalities) > 0 { + pointer = equalities[len(equalities)-1] + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength1) >= float64(utf8.RuneCountInString(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength2) >= float64(utf8.RuneCountInString(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = splice(diffs, pointer, 0, overlap) + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += utf8.RuneCountInString(aDiff.Text) + case DiffDelete: + deletions += utf8.RuneCountInString(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} + +// diffLinesToStrings splits two texts into a list of strings. Each string represents one line. +func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + + //Each string has the index of lineArray which it points to + strIndexArray1 := dmp.diffLinesToStringsMunge(text1, &lineArray) + strIndexArray2 := dmp.diffLinesToStringsMunge(text2, &lineArray) + + return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray +} + +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string) []uint32 { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + lineStart := 0 + lineEnd := -1 + strs := []uint32{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + strs = append(strs, uint32(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + strs = append(strs, uint32(len(*lineArray)-1)) + } + } + + return strs +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 000000000..d3acc32ce --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 000000000..17374e109 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go new file mode 100644 index 000000000..533ec0da7 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go @@ -0,0 +1,17 @@ +// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT. + +package diffmatchpatch + +import "fmt" + +const _Operation_name = "DeleteEqualInsert" + +var _Operation_index = [...]uint8{0, 6, 11, 17} + +func (i Operation) String() string { + i -= -1 + if i < 0 || i >= Operation(len(_Operation_index)-1) { + return fmt.Sprintf("Operation(%d)", i+-1) + } + return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 000000000..0dbe3bdd7 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(rune(x)) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 000000000..44c435954 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,106 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} + +func intArrayToString(ns []uint32) string { + if len(ns) == 0 { + return "" + } + + indexSeparator := IndexSeparator[0] + + // Appr. 3 chars per num plus the comma. + b := []byte{} + for _, n := range ns { + b = strconv.AppendInt(b, int64(n), 10) + b = append(b, indexSeparator) + } + b = b[:len(b)-1] + return string(b) +} diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 6b5177fc3..03449b523 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.14" + Version = "3.5.21" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index 34f35b9f2..f0f3739aa 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -58,7 +58,7 @@ var DefaultZapLoggerConfig = zap.Config{ // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700")) + enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700")) }, EncodeDuration: zapcore.StringDurationEncoder, diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index 150545d08..a7d37688d 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -180,12 +180,23 @@ type TLSInfo struct { parseFunc func([]byte, []byte) (tls.Certificate, error) // AllowedCN is a CN which must be provided by a client. + // + // Deprecated: use AllowedCNs instead. AllowedCN string // AllowedHostname is an IP address or hostname that must match the TLS // certificate provided by a client. + // + // Deprecated: use AllowedHostnames instead. AllowedHostname string + // AllowedCNs is a list of acceptable CNs which must be provided by a client. + AllowedCNs []string + + // AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the + // TLS certificate provided by a client. + AllowedHostnames []string + // Logger logs TLS errors. // If nil, all logs are discarded. Logger *zap.Logger @@ -407,19 +418,52 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // Client certificates may be verified by either an exact match on the CN, // or a more general check of the CN and SANs. var verifyCertificate func(*x509.Certificate) bool + + if info.AllowedCN != "" && len(info.AllowedCNs) > 0 { + return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs) + } + if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames) + } + if info.AllowedCN != "" && info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames) + } + if info.AllowedCN != "" { - if info.AllowedHostname != "" { - return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) - } + info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead") verifyCertificate = func(cert *x509.Certificate) bool { return info.AllowedCN == cert.Subject.CommonName } } if info.AllowedHostname != "" { + info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead") verifyCertificate = func(cert *x509.Certificate) bool { return cert.VerifyHostname(info.AllowedHostname) == nil } } + if len(info.AllowedCNs) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedCN := range info.AllowedCNs { + if allowedCN == cert.Subject.CommonName { + return true + } + } + return false + } + } + if len(info.AllowedHostnames) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedHostname := range info.AllowedHostnames { + if cert.VerifyHostname(allowedHostname) == nil { + return true + } + } + return false + } + } if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md index 1e037d7eb..16c0fe888 100644 --- a/vendor/go.etcd.io/etcd/client/v3/README.md +++ b/vendor/go.etcd.io/etcd/client/v3/README.md @@ -11,13 +11,6 @@ go get go.etcd.io/etcd/client/v3 ``` -Warning: As etcd 3.5.0 was not yet released, the command above does not work. -After first pre-release of 3.5.0 [#12498](https://github.com/etcd-io/etcd/issues/12498), -etcd can be referenced using: -``` -go get go.etcd.io/etcd/client/v3@v3.5.0-pre -``` - ## Get started Create client using `clientv3.New`: diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go index a6f75d321..110918a4c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/auth.go +++ b/vendor/go.etcd.io/etcd/client/v3/auth.go @@ -134,67 +134,67 @@ func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth { func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthenticateResponse)(resp), toErr(ctx, err) + return (*AuthenticateResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) - return (*AuthEnableResponse)(resp), toErr(ctx, err) + return (*AuthEnableResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) - return (*AuthDisableResponse)(resp), toErr(ctx, err) + return (*AuthDisableResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) { resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...) - return (*AuthStatusResponse)(resp), toErr(ctx, err) + return (*AuthStatusResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) + return (*AuthUserAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) + return (*AuthUserAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) + return (*AuthUserDeleteResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) + return (*AuthUserChangePasswordResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) + return (*AuthUserGrantRoleResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) + return (*AuthUserGetResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) - return (*AuthUserListResponse)(resp), toErr(ctx, err) + return (*AuthUserListResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) + return (*AuthUserRevokeRoleResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) + return (*AuthRoleAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { @@ -204,27 +204,27 @@ func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, ke PermType: authpb.Permission_Type(permType), } resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) + return (*AuthRoleGrantPermissionResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) + return (*AuthRoleGetResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) + return (*AuthRoleListResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) + return (*AuthRoleRevokePermissionResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) + return (*AuthRoleDeleteResponse)(resp), ContextError(ctx, err) } func StrToPermissionType(s string) (PermissionType, error) { diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 312d03e7a..f7aa65a0a 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -148,7 +148,7 @@ func (c *Client) Close() error { c.Lease.Close() } if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) + return ContextError(c.ctx, c.conn.Close()) } return c.ctx.Err() } @@ -573,7 +573,9 @@ func isUnavailableErr(ctx context.Context, err error) bool { return false } -func toErr(ctx context.Context, err error) error { +// ContextError converts the error into an EtcdError if the error message matches one of +// the defined messages; otherwise, it tries to retrieve the context error. +func ContextError(ctx context.Context, err error) error { if err == nil { return nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go index 92d7cdb56..1815c1c96 100644 --- a/vendor/go.etcd.io/etcd/client/v3/cluster.go +++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go @@ -93,7 +93,7 @@ func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner b } resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberAddResponse)(resp), nil } @@ -102,7 +102,7 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes r := &pb.MemberRemoveRequest{ID: id} resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberRemoveResponse)(resp), nil } @@ -119,7 +119,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin if err == nil { return (*MemberUpdateResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { @@ -128,14 +128,14 @@ func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { if err == nil { return (*MemberListResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { r := &pb.MemberPromoteRequest{ID: id} resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberPromoteResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/kubernetes/client.go b/vendor/go.etcd.io/etcd/client/v3/kubernetes/client.go new file mode 100644 index 000000000..11f2a4564 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/kubernetes/client.go @@ -0,0 +1,136 @@ +// Copyright 2024 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + clientv3 "go.etcd.io/etcd/client/v3" +) + +// New creates Client from config. +// Caller is responsible to call Close() to clean up client. +func New(cfg clientv3.Config) (*Client, error) { + c, err := clientv3.New(cfg) + if err != nil { + return nil, err + } + kc := &Client{ + Client: c, + } + kc.Kubernetes = kc + return kc, nil +} + +type Client struct { + *clientv3.Client + Kubernetes Interface +} + +var _ Interface = (*Client)(nil) + +func (k Client) Get(ctx context.Context, key string, opts GetOptions) (resp GetResponse, err error) { + rangeResp, err := k.KV.Get(ctx, key, clientv3.WithRev(opts.Revision), clientv3.WithLimit(1)) + if err != nil { + return resp, err + } + resp.Revision = rangeResp.Header.Revision + if len(rangeResp.Kvs) == 1 { + resp.KV = rangeResp.Kvs[0] + } + return resp, nil +} + +func (k Client) List(ctx context.Context, prefix string, opts ListOptions) (resp ListResponse, err error) { + rangeStart := prefix + if opts.Continue != "" { + rangeStart = opts.Continue + } + rangeEnd := clientv3.GetPrefixRangeEnd(prefix) + rangeResp, err := k.KV.Get(ctx, rangeStart, clientv3.WithRange(rangeEnd), clientv3.WithLimit(opts.Limit), clientv3.WithRev(opts.Revision)) + if err != nil { + return resp, err + } + resp.Kvs = rangeResp.Kvs + resp.Count = rangeResp.Count + resp.Revision = rangeResp.Header.Revision + return resp, nil +} + +func (k Client) Count(ctx context.Context, prefix string, _ CountOptions) (int64, error) { + resp, err := k.KV.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithCountOnly()) + if err != nil { + return 0, err + } + return resp.Count, nil +} + +func (k Client) OptimisticPut(ctx context.Context, key string, value []byte, expectedRevision int64, opts PutOptions) (resp PutResponse, err error) { + txn := k.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision), + ).Then( + clientv3.OpPut(key, string(value), clientv3.WithLease(opts.LeaseID)), + ) + + if opts.GetOnFailure { + txn = txn.Else(clientv3.OpGet(key)) + } + + txnResp, err := txn.Commit() + if err != nil { + return resp, err + } + resp.Succeeded = txnResp.Succeeded + resp.Revision = txnResp.Header.Revision + if opts.GetOnFailure && !txnResp.Succeeded { + if len(txnResp.Responses) == 0 { + return resp, fmt.Errorf("invalid OptimisticPut response: %v", txnResp.Responses) + } + resp.KV = kvFromTxnResponse(txnResp.Responses[0]) + } + return resp, nil +} + +func (k Client) OptimisticDelete(ctx context.Context, key string, expectedRevision int64, opts DeleteOptions) (resp DeleteResponse, err error) { + txn := k.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision), + ).Then( + clientv3.OpDelete(key), + ) + if opts.GetOnFailure { + txn = txn.Else(clientv3.OpGet(key)) + } + txnResp, err := txn.Commit() + if err != nil { + return resp, err + } + resp.Succeeded = txnResp.Succeeded + resp.Revision = txnResp.Header.Revision + if opts.GetOnFailure && !txnResp.Succeeded { + resp.KV = kvFromTxnResponse(txnResp.Responses[0]) + } + return resp, nil +} + +func kvFromTxnResponse(resp *pb.ResponseOp) *mvccpb.KeyValue { + getResponse := resp.GetResponseRange() + if len(getResponse.Kvs) == 1 { + return getResponse.Kvs[0] + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/client/v3/kubernetes/interface.go b/vendor/go.etcd.io/etcd/client/v3/kubernetes/interface.go new file mode 100644 index 000000000..19b82a629 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/kubernetes/interface.go @@ -0,0 +1,140 @@ +// Copyright 2024 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + + "go.etcd.io/etcd/api/v3/mvccpb" + clientv3 "go.etcd.io/etcd/client/v3" +) + +// Interface defines the minimal client-side interface that Kubernetes requires +// to interact with etcd. Methods below are standard etcd operations with +// semantics adjusted to better suit Kubernetes' needs. +type Interface interface { + // Get retrieves a single key-value pair from etcd. + // + // If opts.Revision is set to a non-zero value, the key-value pair is retrieved at the specified revision. + // If the required revision has been compacted, the request will fail with ErrCompacted. + Get(ctx context.Context, key string, opts GetOptions) (GetResponse, error) + + // List retrieves key-value pairs with the specified prefix, ordered lexicographically by key. + // + // If opts.Revision is non-zero, the key-value pairs are retrieved at the specified revision. + // If the required revision has been compacted, the request will fail with ErrCompacted. + // If opts.Limit is greater than zero, the number of returned key-value pairs is bounded by the limit. + // If opts.Continue is not empty, the listing will start from the key immediately after the one specified by Continue. + // The Continue value should be the last key returned in a previous paginated ListResponse. + List(ctx context.Context, prefix string, opts ListOptions) (ListResponse, error) + + // Count returns the number of keys with the specified prefix. + // + // Currently, there are no options for the Count operation. However, a placeholder options struct (CountOptions) + // is provided for future extensibility in case options become necessary. + Count(ctx context.Context, prefix string, opts CountOptions) (int64, error) + + // OptimisticPut creates or updates a key-value pair if the key has not been modified or created + // since the revision specified in expectedRevision. + // + // An OptimisticPut fails if the key has been modified since expectedRevision. + OptimisticPut(ctx context.Context, key string, value []byte, expectedRevision int64, opts PutOptions) (PutResponse, error) + + // OptimisticDelete deletes the key-value pair if it hasn't been modified since the revision + // specified in expectedRevision. + // + // An OptimisticDelete fails if the key has been modified since expectedRevision. + OptimisticDelete(ctx context.Context, key string, expectedRevision int64, opts DeleteOptions) (DeleteResponse, error) +} + +type GetOptions struct { + // Revision is the point-in-time of the etcd key-value store to use for the Get operation. + // If Revision is 0, it gets the latest value. + Revision int64 +} + +type ListOptions struct { + // Revision is the point-in-time of the etcd key-value store to use for the List operation. + // If Revision is 0, it gets the latest values. + Revision int64 + + // Limit is the maximum number of keys to return for a List operation. + // 0 means no limitation. + Limit int64 + + // Continue is a key from which to resume the List operation, excluding the given key. + // It should be set to the last key from a previous ListResponse when paginating. + Continue string +} + +// CountOptions is a placeholder for potential future options for the Count operation. +type CountOptions struct{} + +type PutOptions struct { + // GetOnFailure specifies whether to return the modified key-value pair if the Put operation fails due to a revision mismatch. + GetOnFailure bool + + // LeaseID is the ID of a lease to associate with the key allowing for automatic deletion after lease expires after it's TTL (time to live). + // Deprecated: Should be replaced with TTL when Interface starts using one lease per object. + LeaseID clientv3.LeaseID +} + +type DeleteOptions struct { + // GetOnFailure specifies whether to return the modified key-value pair if the Delete operation fails due to a revision mismatch. + GetOnFailure bool +} + +type GetResponse struct { + // KV is the key-value pair retrieved from etcd. + KV *mvccpb.KeyValue + + // Revision is the revision of the key-value store at the time of the Get operation. + Revision int64 +} + +type ListResponse struct { + // Kvs is the list of key-value pairs retrieved from etcd, ordered lexicographically by key. + Kvs []*mvccpb.KeyValue + + // Count is the total number of keys with the specified prefix, even if not all were returned due to a limit. + Count int64 + + // Revision is the revision of the key-value store at the time of the List operation. + Revision int64 +} + +type PutResponse struct { + // KV is the created or updated key-value pair. If the Put operation failed and GetOnFailure was true, this + // will be the modified key-value pair that caused the failure. + KV *mvccpb.KeyValue + + // Succeeded indicates whether the Put operation was successful. + Succeeded bool + + // Revision is the revision of the key-value store after the Put operation. + Revision int64 +} + +type DeleteResponse struct { + // KV is the deleted key-value pair. If the Delete operation failed and GetOnFailure was true, this + // will be the modified key-value pair that caused the failure. + KV *mvccpb.KeyValue + + // Succeeded indicates whether the Delete operation was successful. + Succeeded bool + + // Revision is the revision of the key-value store after the Delete operation. + Revision int64 +} diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go index 5e9fb7d45..be5b508dd 100644 --- a/vendor/go.etcd.io/etcd/client/v3/kv.go +++ b/vendor/go.etcd.io/etcd/client/v3/kv.go @@ -112,23 +112,23 @@ func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) + return r.put, ContextError(ctx, err) } func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) + return r.get, ContextError(ctx, err) } func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) + return r.del, ContextError(ctx, err) } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*CompactResponse)(resp), err } @@ -173,5 +173,5 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { default: panic("Unknown op") } - return OpResponse{}, toErr(ctx, err) + return OpResponse{}, ContextError(ctx, err) } diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index 19af9c093..4877ee949 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -223,7 +223,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err } return gresp, nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { @@ -232,14 +232,14 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, if err == nil { return (*LeaseRevokeResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { r := toLeaseTimeToLiveRequest(id, opts...) resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } gresp := &LeaseTimeToLiveResponse{ ResponseHeader: resp.GetHeader(), @@ -260,9 +260,15 @@ func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { } return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } +// To identify the context passed to `KeepAlive`, a key/value pair is +// attached to the context. The key is a `keepAliveCtxKey` object, and +// the value is the pointer to the context object itself, ensuring +// uniqueness as each context has a unique memory address. +type keepAliveCtxKey struct{} + func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) @@ -277,6 +283,10 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl default: } ka, ok := l.keepAlives[id] + + if ctx.Done() != nil { + ctx = context.WithValue(ctx, keepAliveCtxKey{}, &ctx) + } if !ok { // create fresh keep alive ka = &keepAlive{ @@ -315,7 +325,7 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive return resp, err } if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } } } @@ -347,7 +357,7 @@ func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-cha // close channel and remove context if still associated with keep alive for i, c := range ka.ctxs { - if c == ctx { + if c.Value(keepAliveCtxKey{}) == ctx.Value(keepAliveCtxKey{}) { close(ka.chs[i]) ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) @@ -405,13 +415,13 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKe stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer func() { if err := stream.CloseSend(); err != nil { if ferr == nil { - ferr = toErr(ctx, err) + ferr = ContextError(ctx, err) } return } @@ -419,12 +429,12 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKe err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } resp, rerr := stream.Recv() if rerr != nil { - return nil, toErr(ctx, rerr) + return nil, ContextError(ctx, rerr) } karesp = &LeaseKeepAliveResponse{ @@ -461,7 +471,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { return err } - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + if ContextError(l.stopCtx, err) == rpctypes.ErrNoLeader { l.closeRequireLeader() } break diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go index a98b8ca51..71b28e6dc 100644 --- a/vendor/go.etcd.io/etcd/client/v3/maintenance.go +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -130,7 +130,7 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { if err == nil { return (*AlarmResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -143,13 +143,13 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { ar, err := m.AlarmList(ctx) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } ret := AlarmResponse{} for _, am := range ar.Alarms { dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) if derr != nil { - return nil, toErr(ctx, derr) + return nil, ContextError(ctx, derr) } ret.Alarms = append(ret.Alarms, dresp.Alarms...) } @@ -160,18 +160,18 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR if err == nil { return (*AlarmResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*DefragmentResponse)(resp), nil } @@ -179,12 +179,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*StatusResponse)(resp), nil } @@ -193,12 +193,12 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*HashKVResponse)(resp), nil } @@ -206,7 +206,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } m.lg.Info("opened snapshot stream; downloading") @@ -246,10 +246,10 @@ type snapshotReadCloser struct { func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { n, err = rc.ReadCloser.Read(p) - return n, toErr(rc.ctx, err) + return n, ContextError(rc.ctx, err) } func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) + return (*MoveLeaderResponse)(resp), ContextError(ctx, err) } diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go index 3f6a953cf..e31bfe0b9 100644 --- a/vendor/go.etcd.io/etcd/client/v3/txn.go +++ b/vendor/go.etcd.io/etcd/client/v3/txn.go @@ -144,7 +144,7 @@ func (txn *txn) Commit() (*TxnResponse, error) { var err error resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) if err != nil { - return nil, toErr(txn.ctx, err) + return nil, ContextError(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index 41a6ec976..725e8a869 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -442,7 +442,7 @@ func (w *watchGrpcStream) close() (err error) { case err = <-w.errc: default: } - return toErr(w.ctx, err) + return ContextError(w.ctx, err) } func (w *watcher) closeStream(wgs *watchGrpcStream) { @@ -653,7 +653,7 @@ func (w *watchGrpcStream) run() { // watch client failed on Recv; spawn another if possible case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + if isHaltErr(w.ctx, err) || ContextError(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err return } @@ -1036,7 +1036,7 @@ func (pr *progressRequest) toPB() *pb.WatchRequest { func streamKeyFromCtx(ctx context.Context) string { if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) + return fmt.Sprintf("%+v", map[string][]string(md)) } return "" } diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 000000000..773c9b643 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 000000000..088d19a6c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 000000000..ad73d8cb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 000000000..af6ef171f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 000000000..949e2165c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 000000000..e854d7e84 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 000000000..29e629d66 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 000000000..cecad8bae --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 000000000..b6f2e28d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 000000000..a13a6b733 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 000000000..1217776ea --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 000000000..69a348f0f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 000000000..0dd01b063 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 000000000..86babf1a8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 000000000..6ebea12a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 000000000..cbcfabde3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 000000000..dbc477a59 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 18436eaed..9e87fb4bb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -51,11 +51,11 @@ type config struct { tracer trace.Tracer meter metric.Meter - rpcDuration metric.Float64Histogram - rpcRequestSize metric.Int64Histogram - rpcResponseSize metric.Int64Histogram - rpcRequestsPerRPC metric.Int64Histogram - rpcResponsesPerRPC metric.Int64Histogram + rpcDuration metric.Float64Histogram + rpcInBytes metric.Int64Histogram + rpcOutBytes metric.Int64Histogram + rpcInMessages metric.Int64Histogram + rpcOutMessages metric.Int64Histogram } // Option applies an option value for a config. @@ -96,46 +96,64 @@ func newConfig(opts []Option, role string) *config { } } - c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcRequestSize == nil { - c.rpcRequestSize = noop.Int64Histogram{} + if rpcRequestSize == nil { + rpcRequestSize = noop.Int64Histogram{} } } - c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcResponseSize == nil { - c.rpcResponseSize = noop.Int64Histogram{} + if rpcResponseSize == nil { + rpcResponseSize = noop.Int64Histogram{} } } - c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcRequestsPerRPC == nil { - c.rpcRequestsPerRPC = noop.Int64Histogram{} + if rpcRequestsPerRPC == nil { + rpcRequestsPerRPC = noop.Int64Histogram{} } } - c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcResponsesPerRPC == nil { - c.rpcResponsesPerRPC = noop.Int64Histogram{} + if rpcResponsesPerRPC == nil { + rpcResponsesPerRPC = noop.Int64Histogram{} } } + switch role { + case "client": + c.rpcInBytes = rpcResponseSize + c.rpcInMessages = rpcResponsesPerRPC + c.rpcOutBytes = rpcRequestSize + c.rpcOutMessages = rpcRequestsPerRPC + case "server": + c.rpcInBytes = rpcRequestSize + c.rpcInMessages = rpcRequestsPerRPC + c.rpcOutBytes = rpcResponseSize + c.rpcOutMessages = rpcResponsesPerRPC + default: + c.rpcInBytes = noop.Int64Histogram{} + c.rpcInMessages = noop.Int64Histogram{} + c.rpcOutBytes = noop.Int64Histogram{} + c.rpcOutMessages = noop.Int64Histogram{} + } + return c } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fbcbfb84e..c01cb897c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,21 +13,22 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) type gRPCContextKey struct{} type gRPCContext struct { - messagesReceived int64 - messagesSent int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -150,8 +151,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + messageId = atomic.AddInt64(&gctx.inMessages, 1) + c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,8 +167,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + messageId = atomic.AddInt64(&gctx.outMessages, 1) + c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -213,8 +214,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) + c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) + c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 04f425edf..25a3a8629 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.54.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd..b25641c55 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b..a83a02627 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b..e555a475f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) { h.semconv = semconv.NewHTTPServer(c.Meter) } -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -123,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -190,14 +189,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index aea171fb2..fbc344cbd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -44,7 +44,9 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) { w.mu.Lock() defer w.mu.Unlock() - w.writeHeader(http.StatusOK) + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } n, err := w.ResponseWriter.Write(p) n1 := int64(n) @@ -80,7 +82,12 @@ func (w *RespWriterWrapper) writeHeader(statusCode int) { // Flush implements [http.Flusher]. func (w *RespWriterWrapper) Flush() { - w.WriteHeader(http.StatusOK) + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } if f, ok := w.ResponseWriter.(http.Flusher); ok { f.Flush() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab8..3b036f8a3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -9,6 +9,7 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -50,9 +51,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +61,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -83,29 +84,46 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + ElapsedTime float64 +} + +var metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + // This will happen if an HTTPServer{} is used instead of NewHTTPServer. return } - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) // TODO: Duplicate Metrics } @@ -116,34 +134,43 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) return server } type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } -func NewHTTPClient() HTTPClient { +func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -158,8 +185,53 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} } + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67b..dc9ec7bc3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -14,7 +14,7 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +32,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +59,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +104,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +135,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +150,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +160,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +195,14 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +222,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +284,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +311,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +328,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f..93e8d0f94 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,14 +14,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e6..c042249dd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -17,7 +17,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +35,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +67,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +84,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +113,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + standardizeHTTPMethodMetric(req.Method), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,29 +164,111 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } + +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 000000000..9476ef01b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d343..39681ad4b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 502c1bdaf..353e43b91 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.54.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664b..ae8577ef3 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d9abe194d..ce3f40b60 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,13 +22,16 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -60,16 +63,17 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + - perfsprint + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -94,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -126,8 +137,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -155,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. @@ -302,3 +317,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 6107c17b8..a30988f25 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,9 +8,132 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + ## [1.29.0/0.51.0/0.5.0] 2024-08-23 This release is the last to support [Go 1.21]. @@ -1895,7 +2018,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2469,7 +2592,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3062,7 +3185,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 5904bb707..945a07d2b 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b7402576f..22a2e9dbd 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,7 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -626,13 +629,14 @@ should be canceled. ## Approvers and Maintainers -### Approvers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent -- [Chester Cheung](https://github.com/hanyuancheung), Tencent +### Approvers ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -641,11 +645,13 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 070b1e57d..a7f6d8cc6 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -145,12 +142,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -236,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -261,7 +270,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 657df3471..efec27890 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -51,25 +51,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.23 | amd64 | | Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | | Ubuntu | 1.23 | 386 | | Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | | Linux | 1.23 | arm64 | | Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | | macOS 13 | 1.23 | amd64 | | macOS 13 | 1.22 | amd64 | -| macOS 13 | 1.21 | amd64 | | macOS | 1.23 | arm64 | | macOS | 1.22 | arm64 | -| macOS | 1.21 | arm64 | | Windows | 1.23 | amd64 | | Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | | Windows | 1.23 | 386 | | Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -96,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 59992984d..ffa9b6125 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -111,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362..b8cb605c1 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdb..6cbefcead 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index b3569e95e..0e1fe2422 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -50,7 +50,7 @@ type Property struct { // component boundaries may impose their own restrictions on Property key. // For example, the W3C Baggage specification restricts the Property keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. -// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -90,7 +90,7 @@ func NewKeyValueProperty(key, value string) (Property, error) { // component boundaries may impose their own restrictions on Property key. // For example, the W3C Baggage specification restricts the Property keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. -// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -287,7 +287,7 @@ func NewMember(key, value string, props ...Property) (Member, error) { // component boundaries may impose their own restrictions on baggage key. // For example, the W3C Baggage specification restricts the baggage keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. -// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac354..49a35b122 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc..2e7690e43 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index c3c69c5a0..bf27ef022 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -4,6 +4,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), - StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), - EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { @@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), Flags: flags, }) } @@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 { flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK } - return uint32(flags) + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, - TimeUnixNano: uint64(es[i].Time.UnixNano()), + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927..2171bee3c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac..b7bd429ff 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57..4abf48d1f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8f84a7996..0a317d926 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 4f2113ae2..1c5450ab6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -112,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index bbad0e6d0..00ab1f20c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -59,8 +59,9 @@ func WithInsecure() Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -79,8 +80,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 14ad8c33b..8ea156a09 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.28.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d84794..691d96c75 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6..ae92a4251 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index cfd1df9bf..a6acd8dca 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,8 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -76,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -92,17 +94,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -110,12 +124,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -133,169 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -307,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -335,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -346,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b4..8982aa0dc 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -87,6 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // @@ -139,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 9b1da2c02..b2fe3e41d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,7 +20,8 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index cf23db778..f8435d8f2 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index c82ba5324..e079aaef1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e4023..a535782e1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 8c5ac55ca..0a29a2f13 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -19,6 +19,14 @@ "matchManagers": ["gomod"], "matchDepTypes": ["indirect"], "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4..f2cdf3c65 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045..34852a47b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d..c02aeefdd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7..cf3c88e15 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 71386e2da..3677c83d7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,17 +10,16 @@ import ( "golang.org/x/sys/windows/registry" ) -// implements hostIDReader +// implements hostIDReader. type hostIDReaderWindows struct{} -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography +// read reads MachineGuid from the Windows registry key: +// SOFTWARE\Microsoft\Cryptography. func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index 5e3d199d7..a6a5a53c0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,7 +17,6 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 1d399a75d..ccc97e1b6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { @@ -316,7 +317,11 @@ func (bsp *batchSpanProcessor) processQueue() { bsp.batchMutex.Unlock() if shouldExport { if !bsp.timer.Stop() { - <-bsp.timer.C + // Handle both GODEBUG=asynctimerchan=[0|1] properly. + select { + case <-bsp.timer.C: + default: + } } if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 821c83faa..8c308dd60 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -12,25 +12,26 @@ import ( // evictedQueue is a FIFO queue with a configurable capacity. type evictedQueue[T any] struct { - queue []T - capacity int - droppedCount int - logDropped func() + queue []T + capacity int + droppedCount int + logDroppedMsg string + logDroppedOnce sync.Once } func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Event]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Event", } } func newEvictedQueueLink(capacity int) evictedQueue[Link] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Link]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Link", } } @@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) { eq.queue = append(eq.queue, value) } +func (eq *evictedQueue[T]) logDropped() { + eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) }) +} + // copy returns a copy of the evictedQueue. func (eq *evictedQueue[T]) copy() []T { return slices.Clone(eq.queue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5beb..185aa7c08 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f7246..9b672a1d7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 32f862790..d511d0f27 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index ac90f1a26..8f4fc3850 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -62,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool { s.mu.Lock() defer s.mu.Unlock() + return s.isRecording() +} + +// isRecording returns if this span is being recorded. If this span has ended +// this will return false. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) isRecording() bool { + if s == nil { + return false + } return s.endTime.IsZero() } @@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool { // included in the set status when the code is for an error. If this span is // not being recorded than this method does nothing. func (s *recordingSpan) SetStatus(code codes.Code, description string) { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } if s.status.Code > code { return } @@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { // attributes the span is configured to have, the last added attributes will // be dropped. func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { - if !s.IsRecording() { + if s == nil || len(attributes) == 0 { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { @@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { // Otherwise, add without deduplication. When attributes are read they // will be deduplicated, optimizing the operation. - s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) + s.attributes = slices.Grow(s.attributes, len(attributes)) for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. @@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) // Now that s.attributes is deduplicated, adding unique attributes up to // the capacity of s will not over allocate s.attributes. - sum := len(attrs) + len(s.attributes) - s.attributes = slices.Grow(s.attributes, min(sum, limit)) + + // max size = limit + maxCap := min(len(attrs)+len(s.attributes), limit) + if cap(s.attributes) < maxCap { + s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes)) + } for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. @@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if idx, ok := exists[a.Key]; ok { // Perform all updates before dropping, even when at capacity. + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes[idx] = a continue } @@ -324,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. @@ -386,9 +454,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // the span's duration in case some operation below takes a while. et := monotonicEndTime(s.startTime) - // Do relative expensive check now that we have an end time and see if we - // need to do any more processing. - if !s.IsRecording() { + // Lock the span now that we have an end time and see if we need to do any more processing. + s.mu.Lock() + if !s.isRecording() { + s.mu.Unlock() return } @@ -413,10 +482,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } if s.executionTracerTaskEnd != nil { + s.mu.Unlock() s.executionTracerTaskEnd() + s.mu.Lock() } - s.mu.Lock() // Setting endTime to non-zero marks the span as ended and not recording. if config.Timestamp().IsZero() { s.endTime = et @@ -450,7 +520,13 @@ func monotonicEndTime(start time.Time) time.Time { // does not change the Span status. If this span is not being recorded or err is nil // than this method does nothing. func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil || !s.IsRecording() { + if s == nil || err == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } @@ -486,14 +562,23 @@ func recordStackTrace() string { } // AddEvent adds an event with the provided name and options. If this span is -// not being recorded than this method does nothing. +// not being recorded then this method does nothing. func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { - if !s.IsRecording() { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } s.addEvent(name, o...) } +// addEvent adds an event with the provided name and options. +// +// This method assumes s.mu.Lock is held by the caller. func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} @@ -510,20 +595,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { e.Attributes = e.Attributes[:limit] } - s.mu.Lock() s.events.add(e) - s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than // this method does nothing. func (s *recordingSpan) SetName(name string) { - if !s.IsRecording() { + if s == nil { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } s.name = name } @@ -579,29 +665,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue { func (s *recordingSpan) dedupeAttrs() { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) } // dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity // using record as the record of unique attribute keys to their index. // // This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { +func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { // Use the fact that slices share the same backing array. unique := s.attributes[:0] for _, a := range s.attributes { - if idx, ok := (*record)[a.Key]; ok { + if idx, ok := record[a.Key]; ok { unique[idx] = a } else { unique = append(unique, a) - (*record)[a.Key] = len(unique) - 1 + record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } @@ -642,7 +725,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope @@ -657,7 +740,7 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() { + if s == nil { return } if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && @@ -665,6 +748,12 @@ func (s *recordingSpan) AddLink(link trace.Link) { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} // Discard attributes over limit. @@ -678,9 +767,7 @@ func (s *recordingSpan) AddLink(link trace.Link) { l.Attributes = l.Attributes[:limit] } - s.mu.Lock() s.links.add(l) - s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span @@ -755,12 +842,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } func (s *recordingSpan) addChild() { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } s.childSpanCount++ - s.mu.Unlock() } func (*recordingSpan) private() {} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 33d065a7c..ba7db4889 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.28.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e00..9c0b720a4 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 5650a174b..8c45a7107 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index d661c5d10..cdbf41d6d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fc..000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index f67039ed1..fb7d12673 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.29.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 3ba611d71..9f878cd1f 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.29.0 + version: v1.33.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.51.0 + version: v0.55.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.5.0 + version: v0.9.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.8 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.starlark.net/LICENSE b/vendor/go.starlark.net/LICENSE deleted file mode 100644 index a6609a143..000000000 --- a/vendor/go.starlark.net/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2017 The Bazel Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.starlark.net/internal/compile/compile.go b/vendor/go.starlark.net/internal/compile/compile.go deleted file mode 100644 index 888d95c56..000000000 --- a/vendor/go.starlark.net/internal/compile/compile.go +++ /dev/null @@ -1,1924 +0,0 @@ -// Package compile defines the Starlark bytecode compiler. -// It is an internal package of the Starlark interpreter and is not directly accessible to clients. -// -// The compiler generates byte code with optional uint32 operands for a -// virtual machine with the following components: -// - a program counter, which is an index into the byte code array. -// - an operand stack, whose maximum size is computed for each function by the compiler. -// - an stack of active iterators. -// - an array of local variables. -// The number of local variables and their indices are computed by the resolver. -// Locals (possibly including parameters) that are shared with nested functions -// are 'cells': their locals array slot will contain a value of type 'cell', -// an indirect value in a box that is explicitly read/updated by instructions. -// - an array of free variables, for nested functions. -// Free variables are a subset of the ancestors' cell variables. -// As with locals and cells, these are computed by the resolver. -// - an array of global variables, shared among all functions in the same module. -// All elements are initially nil. -// - two maps of predeclared and universal identifiers. -// -// Each function has a line number table that maps each program counter -// offset to a source position, including the column number. -// -// Operands, logically uint32s, are encoded using little-endian 7-bit -// varints, the top bit indicating that more bytes follow. -// -package compile // import "go.starlark.net/internal/compile" - -import ( - "bytes" - "fmt" - "log" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - - "go.starlark.net/resolve" - "go.starlark.net/syntax" -) - -// Disassemble causes the assembly code for each function -// to be printed to stderr as it is generated. -var Disassemble = false - -const debug = false // make code generation verbose, for debugging the compiler - -// Increment this to force recompilation of saved bytecode files. -const Version = 13 - -type Opcode uint8 - -// "x DUP x x" is a "stack picture" that describes the state of the -// stack before and after execution of the instruction. -// -// OP indicates an immediate operand that is an index into the -// specified table: locals, names, freevars, constants. -const ( - NOP Opcode = iota // - NOP - - - // stack operations - DUP // x DUP x x - DUP2 // x y DUP2 x y x y - POP // x POP - - EXCH // x y EXCH y x - - // binary comparisons - // (order must match Token) - LT - GT - GE - LE - EQL - NEQ - - // binary arithmetic - // (order must match Token) - PLUS - MINUS - STAR - SLASH - SLASHSLASH - PERCENT - AMP - PIPE - CIRCUMFLEX - LTLT - GTGT - - IN - - // unary operators - UPLUS // x UPLUS x - UMINUS // x UMINUS -x - TILDE // x TILDE ~x - - NONE // - NONE None - TRUE // - TRUE True - FALSE // - FALSE False - MANDATORY // - MANDATORY Mandatory [sentinel value for required kwonly args] - - ITERPUSH // iterable ITERPUSH - [pushes the iterator stack] - ITERPOP // - ITERPOP - [pops the iterator stack] - NOT // value NOT bool - RETURN // value RETURN - - SETINDEX // a i new SETINDEX - - INDEX // a i INDEX elem - SETDICT // dict key value SETDICT - - SETDICTUNIQ // dict key value SETDICTUNIQ - - APPEND // list elem APPEND - - SLICE // x lo hi step SLICE slice - INPLACE_ADD // x y INPLACE_ADD z where z is x+y or x.extend(y) - INPLACE_PIPE // x y INPLACE_PIPE z where z is x|y - MAKEDICT // - MAKEDICT dict - - // --- opcodes with an argument must go below this line --- - - // control flow - JMP // - JMP - - CJMP // cond CJMP - - ITERJMP // - ITERJMP elem (and fall through) [acts on topmost iterator] - // or: - ITERJMP - (and jump) - - CONSTANT // - CONSTANT value - MAKETUPLE // x1 ... xn MAKETUPLE tuple - MAKELIST // x1 ... xn MAKELIST list - MAKEFUNC // defaults+freevars MAKEFUNC fn - LOAD // from1 ... fromN module LOAD v1 ... vN - SETLOCAL // value SETLOCAL - - SETGLOBAL // value SETGLOBAL - - LOCAL // - LOCAL value - FREE // - FREE cell - FREECELL // - FREECELL value (content of FREE cell) - LOCALCELL // - LOCALCELL value (content of LOCAL cell) - SETLOCALCELL // value SETLOCALCELL - (set content of LOCAL cell) - GLOBAL // - GLOBAL value - PREDECLARED // - PREDECLARED value - UNIVERSAL // - UNIVERSAL value - ATTR // x ATTR y y = x.name - SETFIELD // x y SETFIELD - x.name = y - UNPACK // iterable UNPACK vn ... v1 - - // n>>8 is #positional args and n&0xff is #named args (pairs). - CALL // fn positional named CALL result - CALL_VAR // fn positional named *args CALL_VAR result - CALL_KW // fn positional named **kwargs CALL_KW result - CALL_VAR_KW // fn positional named *args **kwargs CALL_VAR_KW result - - OpcodeArgMin = JMP - OpcodeMax = CALL_VAR_KW -) - -// TODO(adonovan): add dynamic checks for missing opcodes in the tables below. - -var opcodeNames = [...]string{ - AMP: "amp", - APPEND: "append", - ATTR: "attr", - CALL: "call", - CALL_KW: "call_kw ", - CALL_VAR: "call_var", - CALL_VAR_KW: "call_var_kw", - CIRCUMFLEX: "circumflex", - CJMP: "cjmp", - CONSTANT: "constant", - DUP2: "dup2", - DUP: "dup", - EQL: "eql", - EXCH: "exch", - FALSE: "false", - FREE: "free", - FREECELL: "freecell", - GE: "ge", - GLOBAL: "global", - GT: "gt", - GTGT: "gtgt", - IN: "in", - INDEX: "index", - INPLACE_ADD: "inplace_add", - INPLACE_PIPE: "inplace_pipe", - ITERJMP: "iterjmp", - ITERPOP: "iterpop", - ITERPUSH: "iterpush", - JMP: "jmp", - LE: "le", - LOAD: "load", - LOCAL: "local", - LOCALCELL: "localcell", - LT: "lt", - LTLT: "ltlt", - MAKEDICT: "makedict", - MAKEFUNC: "makefunc", - MAKELIST: "makelist", - MAKETUPLE: "maketuple", - MANDATORY: "mandatory", - MINUS: "minus", - NEQ: "neq", - NONE: "none", - NOP: "nop", - NOT: "not", - PERCENT: "percent", - PIPE: "pipe", - PLUS: "plus", - POP: "pop", - PREDECLARED: "predeclared", - RETURN: "return", - SETDICT: "setdict", - SETDICTUNIQ: "setdictuniq", - SETFIELD: "setfield", - SETGLOBAL: "setglobal", - SETINDEX: "setindex", - SETLOCAL: "setlocal", - SETLOCALCELL: "setlocalcell", - SLASH: "slash", - SLASHSLASH: "slashslash", - SLICE: "slice", - STAR: "star", - TILDE: "tilde", - TRUE: "true", - UMINUS: "uminus", - UNIVERSAL: "universal", - UNPACK: "unpack", - UPLUS: "uplus", -} - -const variableStackEffect = 0x7f - -// stackEffect records the effect on the size of the operand stack of -// each kind of instruction. For some instructions this requires computation. -var stackEffect = [...]int8{ - AMP: -1, - APPEND: -2, - ATTR: 0, - CALL: variableStackEffect, - CALL_KW: variableStackEffect, - CALL_VAR: variableStackEffect, - CALL_VAR_KW: variableStackEffect, - CIRCUMFLEX: -1, - CJMP: -1, - CONSTANT: +1, - DUP2: +2, - DUP: +1, - EQL: -1, - FALSE: +1, - FREE: +1, - FREECELL: +1, - GE: -1, - GLOBAL: +1, - GT: -1, - GTGT: -1, - IN: -1, - INDEX: -1, - INPLACE_ADD: -1, - INPLACE_PIPE: -1, - ITERJMP: variableStackEffect, - ITERPOP: 0, - ITERPUSH: -1, - JMP: 0, - LE: -1, - LOAD: -1, - LOCAL: +1, - LOCALCELL: +1, - LT: -1, - LTLT: -1, - MAKEDICT: +1, - MAKEFUNC: 0, - MAKELIST: variableStackEffect, - MAKETUPLE: variableStackEffect, - MANDATORY: +1, - MINUS: -1, - NEQ: -1, - NONE: +1, - NOP: 0, - NOT: 0, - PERCENT: -1, - PIPE: -1, - PLUS: -1, - POP: -1, - PREDECLARED: +1, - RETURN: -1, - SETLOCALCELL: -1, - SETDICT: -3, - SETDICTUNIQ: -3, - SETFIELD: -2, - SETGLOBAL: -1, - SETINDEX: -3, - SETLOCAL: -1, - SLASH: -1, - SLASHSLASH: -1, - SLICE: -3, - STAR: -1, - TRUE: +1, - UMINUS: 0, - UNIVERSAL: +1, - UNPACK: variableStackEffect, - UPLUS: 0, -} - -func (op Opcode) String() string { - if op < OpcodeMax { - if name := opcodeNames[op]; name != "" { - return name - } - } - return fmt.Sprintf("illegal op (%d)", op) -} - -// A Program is a Starlark file in executable form. -// -// Programs are serialized by the Program.Encode method, -// which must be updated whenever this declaration is changed. -type Program struct { - Loads []Binding // name (really, string) and position of each load stmt - Names []string // names of attributes and predeclared variables - Constants []interface{} // = string | int64 | float64 | *big.Int | Bytes - Functions []*Funcode - Globals []Binding // for error messages and tracing - Toplevel *Funcode // module initialization function -} - -// The type of a bytes literal value, to distinguish from text string. -type Bytes string - -// A Funcode is the code of a compiled Starlark function. -// -// Funcodes are serialized by the encoder.function method, -// which must be updated whenever this declaration is changed. -type Funcode struct { - Prog *Program - Pos syntax.Position // position of def or lambda token - Name string // name of this function - Doc string // docstring of this function - Code []byte // the byte code - pclinetab []uint16 // mapping from pc to linenum - Locals []Binding // locals, parameters first - Cells []int // indices of Locals that require cells - Freevars []Binding // for tracing - MaxStack int - NumParams int - NumKwonlyParams int - HasVarargs, HasKwargs bool - - // -- transient state -- - - lntOnce sync.Once - lnt []pclinecol // decoded line number table -} - -type pclinecol struct { - pc uint32 - line, col int32 -} - -// A Binding is the name and position of a binding identifier. -type Binding struct { - Name string - Pos syntax.Position -} - -// A pcomp holds the compiler state for a Program. -type pcomp struct { - prog *Program // what we're building - - names map[string]uint32 - constants map[interface{}]uint32 - functions map[*Funcode]uint32 -} - -// An fcomp holds the compiler state for a Funcode. -type fcomp struct { - fn *Funcode // what we're building - - pcomp *pcomp - pos syntax.Position // current position of generated code - loops []loop - block *block -} - -type loop struct { - break_, continue_ *block -} - -type block struct { - insns []insn - - // If the last insn is a RETURN, jmp and cjmp are nil. - // If the last insn is a CJMP or ITERJMP, - // cjmp and jmp are the "true" and "false" successors. - // Otherwise, jmp is the sole successor. - jmp, cjmp *block - - initialstack int // for stack depth computation - - // Used during encoding - index int // -1 => not encoded yet - addr uint32 -} - -type insn struct { - op Opcode - arg uint32 - line, col int32 -} - -// Position returns the source position for program counter pc. -func (fn *Funcode) Position(pc uint32) syntax.Position { - fn.lntOnce.Do(fn.decodeLNT) - - // Binary search to find last LNT entry not greater than pc. - // To avoid dynamic dispatch, this is a specialization of - // sort.Search using this predicate: - // !(i < len(fn.lnt)-1 && fn.lnt[i+1].pc <= pc) - n := len(fn.lnt) - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) - if !(h >= n-1 || fn.lnt[h+1].pc > pc) { - i = h + 1 - } else { - j = h - } - } - - var line, col int32 - if i < n { - line = fn.lnt[i].line - col = fn.lnt[i].col - } - - pos := fn.Pos // copy the (annoyingly inaccessible) filename - pos.Col = col - pos.Line = line - return pos -} - -// decodeLNT decodes the line number table and populates fn.lnt. -// It is called at most once. -func (fn *Funcode) decodeLNT() { - // Conceptually the table contains rows of the form - // (pc uint32, line int32, col int32), sorted by pc. - // We use a delta encoding, since the differences - // between successive pc, line, and column values - // are typically small and positive (though line and - // especially column differences may be negative). - // The delta encoding starts from - // {pc: 0, line: fn.Pos.Line, col: fn.Pos.Col}. - // - // Each entry is packed into one or more 16-bit values: - // Δpc uint4 - // Δline int5 - // Δcol int6 - // incomplete uint1 - // The top 4 bits are the unsigned delta pc. - // The next 5 bits are the signed line number delta. - // The next 6 bits are the signed column number delta. - // The bottom bit indicates that more rows follow because - // one of the deltas was maxed out. - // These field widths were chosen from a sample of real programs, - // and allow >97% of rows to be encoded in a single uint16. - - fn.lnt = make([]pclinecol, 0, len(fn.pclinetab)) // a minor overapproximation - entry := pclinecol{ - pc: 0, - line: fn.Pos.Line, - col: fn.Pos.Col, - } - for _, x := range fn.pclinetab { - entry.pc += uint32(x) >> 12 - entry.line += int32((int16(x) << 4) >> (16 - 5)) // sign extend Δline - entry.col += int32((int16(x) << 9) >> (16 - 6)) // sign extend Δcol - if (x & 1) == 0 { - fn.lnt = append(fn.lnt, entry) - } - } -} - -// bindings converts resolve.Bindings to compiled form. -func bindings(bindings []*resolve.Binding) []Binding { - res := make([]Binding, len(bindings)) - for i, bind := range bindings { - res[i].Name = bind.First.Name - res[i].Pos = bind.First.NamePos - } - return res -} - -// Expr compiles an expression to a program whose toplevel function evaluates it. -func Expr(expr syntax.Expr, name string, locals []*resolve.Binding) *Program { - pos := syntax.Start(expr) - stmts := []syntax.Stmt{&syntax.ReturnStmt{Result: expr}} - return File(stmts, pos, name, locals, nil) -} - -// File compiles the statements of a file into a program. -func File(stmts []syntax.Stmt, pos syntax.Position, name string, locals, globals []*resolve.Binding) *Program { - pcomp := &pcomp{ - prog: &Program{ - Globals: bindings(globals), - }, - names: make(map[string]uint32), - constants: make(map[interface{}]uint32), - functions: make(map[*Funcode]uint32), - } - pcomp.prog.Toplevel = pcomp.function(name, pos, stmts, locals, nil) - - return pcomp.prog -} - -func (pcomp *pcomp) function(name string, pos syntax.Position, stmts []syntax.Stmt, locals, freevars []*resolve.Binding) *Funcode { - fcomp := &fcomp{ - pcomp: pcomp, - pos: pos, - fn: &Funcode{ - Prog: pcomp.prog, - Pos: pos, - Name: name, - Doc: docStringFromBody(stmts), - Locals: bindings(locals), - Freevars: bindings(freevars), - }, - } - - // Record indices of locals that require cells. - for i, local := range locals { - if local.Scope == resolve.Cell { - fcomp.fn.Cells = append(fcomp.fn.Cells, i) - } - } - - if debug { - fmt.Fprintf(os.Stderr, "start function(%s @ %s)\n", name, pos) - } - - // Convert AST to a CFG of instructions. - entry := fcomp.newBlock() - fcomp.block = entry - fcomp.stmts(stmts) - if fcomp.block != nil { - fcomp.emit(NONE) - fcomp.emit(RETURN) - } - - var oops bool // something bad happened - - setinitialstack := func(b *block, depth int) { - if b.initialstack == -1 { - b.initialstack = depth - } else if b.initialstack != depth { - fmt.Fprintf(os.Stderr, "%d: setinitialstack: depth mismatch: %d vs %d\n", - b.index, b.initialstack, depth) - oops = true - } - } - - // Linearize the CFG: - // compute order, address, and initial - // stack depth of each reachable block. - var pc uint32 - var blocks []*block - var maxstack int - var visit func(b *block) - visit = func(b *block) { - if b.index >= 0 { - return // already visited - } - b.index = len(blocks) - b.addr = pc - blocks = append(blocks, b) - - stack := b.initialstack - if debug { - fmt.Fprintf(os.Stderr, "%s block %d: (stack = %d)\n", name, b.index, stack) - } - var cjmpAddr *uint32 - var isiterjmp int - for i, insn := range b.insns { - pc++ - - // Compute size of argument. - if insn.op >= OpcodeArgMin { - switch insn.op { - case ITERJMP: - isiterjmp = 1 - fallthrough - case CJMP: - cjmpAddr = &b.insns[i].arg - pc += 4 - default: - pc += uint32(argLen(insn.arg)) - } - } - - // Compute effect on stack. - se := insn.stackeffect() - if debug { - fmt.Fprintln(os.Stderr, "\t", insn.op, stack, stack+se) - } - stack += se - if stack < 0 { - fmt.Fprintf(os.Stderr, "After pc=%d: stack underflow\n", pc) - oops = true - } - if stack+isiterjmp > maxstack { - maxstack = stack + isiterjmp - } - } - - if debug { - fmt.Fprintf(os.Stderr, "successors of block %d (start=%d):\n", - b.addr, b.index) - if b.jmp != nil { - fmt.Fprintf(os.Stderr, "jmp to %d\n", b.jmp.index) - } - if b.cjmp != nil { - fmt.Fprintf(os.Stderr, "cjmp to %d\n", b.cjmp.index) - } - } - - // Place the jmp block next. - if b.jmp != nil { - // jump threading (empty cycles are impossible) - for b.jmp.insns == nil { - b.jmp = b.jmp.jmp - } - - setinitialstack(b.jmp, stack+isiterjmp) - if b.jmp.index < 0 { - // Successor is not yet visited: - // place it next and fall through. - visit(b.jmp) - } else { - // Successor already visited; - // explicit backward jump required. - pc += 5 - } - } - - // Then the cjmp block. - if b.cjmp != nil { - // jump threading (empty cycles are impossible) - for b.cjmp.insns == nil { - b.cjmp = b.cjmp.jmp - } - - setinitialstack(b.cjmp, stack) - visit(b.cjmp) - - // Patch the CJMP/ITERJMP, if present. - if cjmpAddr != nil { - *cjmpAddr = b.cjmp.addr - } - } - } - setinitialstack(entry, 0) - visit(entry) - - fn := fcomp.fn - fn.MaxStack = maxstack - - // Emit bytecode (and position table). - if Disassemble { - fmt.Fprintf(os.Stderr, "Function %s: (%d blocks, %d bytes)\n", name, len(blocks), pc) - } - fcomp.generate(blocks, pc) - - if debug { - fmt.Fprintf(os.Stderr, "code=%d maxstack=%d\n", fn.Code, fn.MaxStack) - } - - // Don't panic until we've completed printing of the function. - if oops { - panic("internal error") - } - - if debug { - fmt.Fprintf(os.Stderr, "end function(%s @ %s)\n", name, pos) - } - - return fn -} - -func docStringFromBody(body []syntax.Stmt) string { - if len(body) == 0 { - return "" - } - expr, ok := body[0].(*syntax.ExprStmt) - if !ok { - return "" - } - lit, ok := expr.X.(*syntax.Literal) - if !ok { - return "" - } - if lit.Token != syntax.STRING { - return "" - } - return lit.Value.(string) -} - -func (insn *insn) stackeffect() int { - se := int(stackEffect[insn.op]) - if se == variableStackEffect { - arg := int(insn.arg) - switch insn.op { - case CALL, CALL_KW, CALL_VAR, CALL_VAR_KW: - se = -int(2*(insn.arg&0xff) + insn.arg>>8) - if insn.op != CALL { - se-- - } - if insn.op == CALL_VAR_KW { - se-- - } - case ITERJMP: - // Stack effect differs by successor: - // +1 for jmp/false/ok - // 0 for cjmp/true/exhausted - // Handled specially in caller. - se = 0 - case MAKELIST, MAKETUPLE: - se = 1 - arg - case UNPACK: - se = arg - 1 - default: - panic(insn.op) - } - } - return se -} - -// generate emits the linear instruction stream from the CFG, -// and builds the PC-to-line number table. -func (fcomp *fcomp) generate(blocks []*block, codelen uint32) { - code := make([]byte, 0, codelen) - var pclinetab []uint16 - prev := pclinecol{ - pc: 0, - line: fcomp.fn.Pos.Line, - col: fcomp.fn.Pos.Col, - } - - for _, b := range blocks { - if Disassemble { - fmt.Fprintf(os.Stderr, "%d:\n", b.index) - } - pc := b.addr - for _, insn := range b.insns { - if insn.line != 0 { - // Instruction has a source position. Delta-encode it. - // See Funcode.Position for the encoding. - for { - var incomplete uint16 - - // Δpc, uint4 - deltapc := pc - prev.pc - if deltapc > 0x0f { - deltapc = 0x0f - incomplete = 1 - } - prev.pc += deltapc - - // Δline, int5 - deltaline, ok := clip(insn.line-prev.line, -0x10, 0x0f) - if !ok { - incomplete = 1 - } - prev.line += deltaline - - // Δcol, int6 - deltacol, ok := clip(insn.col-prev.col, -0x20, 0x1f) - if !ok { - incomplete = 1 - } - prev.col += deltacol - - entry := uint16(deltapc<<12) | uint16(deltaline&0x1f)<<7 | uint16(deltacol&0x3f)<<1 | incomplete - pclinetab = append(pclinetab, entry) - if incomplete == 0 { - break - } - } - - if Disassemble { - fmt.Fprintf(os.Stderr, "\t\t\t\t\t; %s:%d:%d\n", - filepath.Base(fcomp.fn.Pos.Filename()), insn.line, insn.col) - } - } - if Disassemble { - PrintOp(fcomp.fn, pc, insn.op, insn.arg) - } - code = append(code, byte(insn.op)) - pc++ - if insn.op >= OpcodeArgMin { - if insn.op == CJMP || insn.op == ITERJMP { - code = addUint32(code, insn.arg, 4) // pad arg to 4 bytes - } else { - code = addUint32(code, insn.arg, 0) - } - pc = uint32(len(code)) - } - } - - if b.jmp != nil && b.jmp.index != b.index+1 { - addr := b.jmp.addr - if Disassemble { - fmt.Fprintf(os.Stderr, "\t%d\tjmp\t\t%d\t; block %d\n", - pc, addr, b.jmp.index) - } - code = append(code, byte(JMP)) - code = addUint32(code, addr, 4) - } - } - if len(code) != int(codelen) { - panic("internal error: wrong code length") - } - - fcomp.fn.pclinetab = pclinetab - fcomp.fn.Code = code -} - -// clip returns the value nearest x in the range [min...max], -// and whether it equals x. -func clip(x, min, max int32) (int32, bool) { - if x > max { - return max, false - } else if x < min { - return min, false - } else { - return x, true - } -} - -// addUint32 encodes x as 7-bit little-endian varint. -// TODO(adonovan): opt: steal top two bits of opcode -// to encode the number of complete bytes that follow. -func addUint32(code []byte, x uint32, min int) []byte { - end := len(code) + min - for x >= 0x80 { - code = append(code, byte(x)|0x80) - x >>= 7 - } - code = append(code, byte(x)) - // Pad the operand with NOPs to exactly min bytes. - for len(code) < end { - code = append(code, byte(NOP)) - } - return code -} - -func argLen(x uint32) int { - n := 0 - for x >= 0x80 { - n++ - x >>= 7 - } - return n + 1 -} - -// PrintOp prints an instruction. -// It is provided for debugging. -func PrintOp(fn *Funcode, pc uint32, op Opcode, arg uint32) { - if op < OpcodeArgMin { - fmt.Fprintf(os.Stderr, "\t%d\t%s\n", pc, op) - return - } - - var comment string - switch op { - case CONSTANT: - switch x := fn.Prog.Constants[arg].(type) { - case string: - comment = strconv.Quote(x) - case Bytes: - comment = "b" + strconv.Quote(string(x)) - default: - comment = fmt.Sprint(x) - } - case MAKEFUNC: - comment = fn.Prog.Functions[arg].Name - case SETLOCAL, LOCAL: - comment = fn.Locals[arg].Name - case SETGLOBAL, GLOBAL: - comment = fn.Prog.Globals[arg].Name - case ATTR, SETFIELD, PREDECLARED, UNIVERSAL: - comment = fn.Prog.Names[arg] - case FREE: - comment = fn.Freevars[arg].Name - case CALL, CALL_VAR, CALL_KW, CALL_VAR_KW: - comment = fmt.Sprintf("%d pos, %d named", arg>>8, arg&0xff) - default: - // JMP, CJMP, ITERJMP, MAKETUPLE, MAKELIST, LOAD, UNPACK: - // arg is just a number - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "\t%d\t%-10s\t%d", pc, op, arg) - if comment != "" { - fmt.Fprint(&buf, "\t; ", comment) - } - fmt.Fprintln(&buf) - os.Stderr.Write(buf.Bytes()) -} - -// newBlock returns a new block. -func (fcomp) newBlock() *block { - return &block{index: -1, initialstack: -1} -} - -// emit emits an instruction to the current block. -func (fcomp *fcomp) emit(op Opcode) { - if op >= OpcodeArgMin { - panic("missing arg: " + op.String()) - } - insn := insn{op: op, line: fcomp.pos.Line, col: fcomp.pos.Col} - fcomp.block.insns = append(fcomp.block.insns, insn) - fcomp.pos.Line = 0 - fcomp.pos.Col = 0 -} - -// emit1 emits an instruction with an immediate operand. -func (fcomp *fcomp) emit1(op Opcode, arg uint32) { - if op < OpcodeArgMin { - panic("unwanted arg: " + op.String()) - } - insn := insn{op: op, arg: arg, line: fcomp.pos.Line, col: fcomp.pos.Col} - fcomp.block.insns = append(fcomp.block.insns, insn) - fcomp.pos.Line = 0 - fcomp.pos.Col = 0 -} - -// jump emits a jump to the specified block. -// On return, the current block is unset. -func (fcomp *fcomp) jump(b *block) { - if b == fcomp.block { - panic("self-jump") // unreachable: Starlark has no arbitrary looping constructs - } - fcomp.block.jmp = b - fcomp.block = nil -} - -// condjump emits a conditional jump (CJMP or ITERJMP) -// to the specified true/false blocks. -// (For ITERJMP, the cases are jmp/f/ok and cjmp/t/exhausted.) -// On return, the current block is unset. -func (fcomp *fcomp) condjump(op Opcode, t, f *block) { - if !(op == CJMP || op == ITERJMP) { - panic("not a conditional jump: " + op.String()) - } - fcomp.emit1(op, 0) // fill in address later - fcomp.block.cjmp = t - fcomp.jump(f) -} - -// nameIndex returns the index of the specified name -// within the name pool, adding it if necessary. -func (pcomp *pcomp) nameIndex(name string) uint32 { - index, ok := pcomp.names[name] - if !ok { - index = uint32(len(pcomp.prog.Names)) - pcomp.names[name] = index - pcomp.prog.Names = append(pcomp.prog.Names, name) - } - return index -} - -// constantIndex returns the index of the specified constant -// within the constant pool, adding it if necessary. -func (pcomp *pcomp) constantIndex(v interface{}) uint32 { - index, ok := pcomp.constants[v] - if !ok { - index = uint32(len(pcomp.prog.Constants)) - pcomp.constants[v] = index - pcomp.prog.Constants = append(pcomp.prog.Constants, v) - } - return index -} - -// functionIndex returns the index of the specified function -// AST the nestedfun pool, adding it if necessary. -func (pcomp *pcomp) functionIndex(fn *Funcode) uint32 { - index, ok := pcomp.functions[fn] - if !ok { - index = uint32(len(pcomp.prog.Functions)) - pcomp.functions[fn] = index - pcomp.prog.Functions = append(pcomp.prog.Functions, fn) - } - return index -} - -// string emits code to push the specified string. -func (fcomp *fcomp) string(s string) { - fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(s)) -} - -// setPos sets the current source position. -// It should be called prior to any operation that can fail dynamically. -// All positions are assumed to belong to the same file. -func (fcomp *fcomp) setPos(pos syntax.Position) { - fcomp.pos = pos -} - -// set emits code to store the top-of-stack value -// to the specified local, cell, or global variable. -func (fcomp *fcomp) set(id *syntax.Ident) { - bind := id.Binding.(*resolve.Binding) - switch bind.Scope { - case resolve.Local: - fcomp.emit1(SETLOCAL, uint32(bind.Index)) - case resolve.Cell: - fcomp.emit1(SETLOCALCELL, uint32(bind.Index)) - case resolve.Global: - fcomp.emit1(SETGLOBAL, uint32(bind.Index)) - default: - log.Panicf("%s: set(%s): not global/local/cell (%d)", id.NamePos, id.Name, bind.Scope) - } -} - -// lookup emits code to push the value of the specified variable. -func (fcomp *fcomp) lookup(id *syntax.Ident) { - bind := id.Binding.(*resolve.Binding) - if bind.Scope != resolve.Universal { // (universal lookup can't fail) - fcomp.setPos(id.NamePos) - } - switch bind.Scope { - case resolve.Local: - fcomp.emit1(LOCAL, uint32(bind.Index)) - case resolve.Free: - fcomp.emit1(FREECELL, uint32(bind.Index)) - case resolve.Cell: - fcomp.emit1(LOCALCELL, uint32(bind.Index)) - case resolve.Global: - fcomp.emit1(GLOBAL, uint32(bind.Index)) - case resolve.Predeclared: - fcomp.emit1(PREDECLARED, fcomp.pcomp.nameIndex(id.Name)) - case resolve.Universal: - fcomp.emit1(UNIVERSAL, fcomp.pcomp.nameIndex(id.Name)) - default: - log.Panicf("%s: compiler.lookup(%s): scope = %d", id.NamePos, id.Name, bind.Scope) - } -} - -func (fcomp *fcomp) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - fcomp.stmt(stmt) - } -} - -func (fcomp *fcomp) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - if _, ok := stmt.X.(*syntax.Literal); ok { - // Opt: don't compile doc comments only to pop them. - return - } - fcomp.expr(stmt.X) - fcomp.emit(POP) - - case *syntax.BranchStmt: - // Resolver invariant: break/continue appear only within loops. - switch stmt.Token { - case syntax.PASS: - // no-op - case syntax.BREAK: - b := fcomp.loops[len(fcomp.loops)-1].break_ - fcomp.jump(b) - fcomp.block = fcomp.newBlock() // dead code - case syntax.CONTINUE: - b := fcomp.loops[len(fcomp.loops)-1].continue_ - fcomp.jump(b) - fcomp.block = fcomp.newBlock() // dead code - } - - case *syntax.IfStmt: - // Keep consistent with CondExpr. - t := fcomp.newBlock() - f := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.ifelse(stmt.Cond, t, f) - - fcomp.block = t - fcomp.stmts(stmt.True) - fcomp.jump(done) - - fcomp.block = f - fcomp.stmts(stmt.False) - fcomp.jump(done) - - fcomp.block = done - - case *syntax.AssignStmt: - switch stmt.Op { - case syntax.EQ: - // simple assignment: x = y - fcomp.expr(stmt.RHS) - fcomp.assign(stmt.OpPos, stmt.LHS) - - case syntax.PLUS_EQ, - syntax.MINUS_EQ, - syntax.STAR_EQ, - syntax.SLASH_EQ, - syntax.SLASHSLASH_EQ, - syntax.PERCENT_EQ, - syntax.AMP_EQ, - syntax.PIPE_EQ, - syntax.CIRCUMFLEX_EQ, - syntax.LTLT_EQ, - syntax.GTGT_EQ: - // augmented assignment: x += y - - var set func() - - // Evaluate "address" of x exactly once to avoid duplicate side-effects. - switch lhs := unparen(stmt.LHS).(type) { - case *syntax.Ident: - // x = ... - fcomp.lookup(lhs) - set = func() { - fcomp.set(lhs) - } - - case *syntax.IndexExpr: - // x[y] = ... - fcomp.expr(lhs.X) - fcomp.expr(lhs.Y) - fcomp.emit(DUP2) - fcomp.setPos(lhs.Lbrack) - fcomp.emit(INDEX) - set = func() { - fcomp.setPos(lhs.Lbrack) - fcomp.emit(SETINDEX) - } - - case *syntax.DotExpr: - // x.f = ... - fcomp.expr(lhs.X) - fcomp.emit(DUP) - name := fcomp.pcomp.nameIndex(lhs.Name.Name) - fcomp.setPos(lhs.Dot) - fcomp.emit1(ATTR, name) - set = func() { - fcomp.setPos(lhs.Dot) - fcomp.emit1(SETFIELD, name) - } - - default: - panic(lhs) - } - - fcomp.expr(stmt.RHS) - - // In-place x+=y and x|=y have special semantics: - // the resulting x aliases the original x. - switch stmt.Op { - case syntax.PLUS_EQ: - fcomp.setPos(stmt.OpPos) - fcomp.emit(INPLACE_ADD) - case syntax.PIPE_EQ: - fcomp.setPos(stmt.OpPos) - fcomp.emit(INPLACE_PIPE) - default: - fcomp.binop(stmt.OpPos, stmt.Op-syntax.PLUS_EQ+syntax.PLUS) - } - set() - } - - case *syntax.DefStmt: - fcomp.function(stmt.Function.(*resolve.Function)) - fcomp.set(stmt.Name) - - case *syntax.ForStmt: - // Keep consistent with ForClause. - head := fcomp.newBlock() - body := fcomp.newBlock() - tail := fcomp.newBlock() - - fcomp.expr(stmt.X) - fcomp.setPos(stmt.For) - fcomp.emit(ITERPUSH) - fcomp.jump(head) - - fcomp.block = head - fcomp.condjump(ITERJMP, tail, body) - - fcomp.block = body - fcomp.assign(stmt.For, stmt.Vars) - fcomp.loops = append(fcomp.loops, loop{break_: tail, continue_: head}) - fcomp.stmts(stmt.Body) - fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] - fcomp.jump(head) - - fcomp.block = tail - fcomp.emit(ITERPOP) - - case *syntax.WhileStmt: - head := fcomp.newBlock() - body := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.jump(head) - fcomp.block = head - fcomp.ifelse(stmt.Cond, body, done) - - fcomp.block = body - fcomp.loops = append(fcomp.loops, loop{break_: done, continue_: head}) - fcomp.stmts(stmt.Body) - fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] - fcomp.jump(head) - - fcomp.block = done - - case *syntax.ReturnStmt: - if stmt.Result != nil { - fcomp.expr(stmt.Result) - } else { - fcomp.emit(NONE) - } - fcomp.emit(RETURN) - fcomp.block = fcomp.newBlock() // dead code - - case *syntax.LoadStmt: - for i := range stmt.From { - fcomp.string(stmt.From[i].Name) - } - module := stmt.Module.Value.(string) - fcomp.pcomp.prog.Loads = append(fcomp.pcomp.prog.Loads, Binding{ - Name: module, - Pos: stmt.Module.TokenPos, - }) - fcomp.string(module) - fcomp.setPos(stmt.Load) - fcomp.emit1(LOAD, uint32(len(stmt.From))) - for i := range stmt.To { - fcomp.set(stmt.To[len(stmt.To)-1-i]) - } - - default: - start, _ := stmt.Span() - log.Panicf("%s: exec: unexpected statement %T", start, stmt) - } -} - -// assign implements lhs = rhs for arbitrary expressions lhs. -// RHS is on top of stack, consumed. -func (fcomp *fcomp) assign(pos syntax.Position, lhs syntax.Expr) { - switch lhs := lhs.(type) { - case *syntax.ParenExpr: - // (lhs) = rhs - fcomp.assign(pos, lhs.X) - - case *syntax.Ident: - // x = rhs - fcomp.set(lhs) - - case *syntax.TupleExpr: - // x, y = rhs - fcomp.assignSequence(pos, lhs.List) - - case *syntax.ListExpr: - // [x, y] = rhs - fcomp.assignSequence(pos, lhs.List) - - case *syntax.IndexExpr: - // x[y] = rhs - fcomp.expr(lhs.X) - fcomp.emit(EXCH) - fcomp.expr(lhs.Y) - fcomp.emit(EXCH) - fcomp.setPos(lhs.Lbrack) - fcomp.emit(SETINDEX) - - case *syntax.DotExpr: - // x.f = rhs - fcomp.expr(lhs.X) - fcomp.emit(EXCH) - fcomp.setPos(lhs.Dot) - fcomp.emit1(SETFIELD, fcomp.pcomp.nameIndex(lhs.Name.Name)) - - default: - panic(lhs) - } -} - -func (fcomp *fcomp) assignSequence(pos syntax.Position, lhs []syntax.Expr) { - fcomp.setPos(pos) - fcomp.emit1(UNPACK, uint32(len(lhs))) - for i := range lhs { - fcomp.assign(pos, lhs[i]) - } -} - -func (fcomp *fcomp) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.ParenExpr: - fcomp.expr(e.X) - - case *syntax.Ident: - fcomp.lookup(e) - - case *syntax.Literal: - // e.Value is int64, float64, *bigInt, string - v := e.Value - if e.Token == syntax.BYTES { - v = Bytes(v.(string)) - } - fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(v)) - - case *syntax.ListExpr: - for _, x := range e.List { - fcomp.expr(x) - } - fcomp.emit1(MAKELIST, uint32(len(e.List))) - - case *syntax.CondExpr: - // Keep consistent with IfStmt. - t := fcomp.newBlock() - f := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.ifelse(e.Cond, t, f) - - fcomp.block = t - fcomp.expr(e.True) - fcomp.jump(done) - - fcomp.block = f - fcomp.expr(e.False) - fcomp.jump(done) - - fcomp.block = done - - case *syntax.IndexExpr: - fcomp.expr(e.X) - fcomp.expr(e.Y) - fcomp.setPos(e.Lbrack) - fcomp.emit(INDEX) - - case *syntax.SliceExpr: - fcomp.setPos(e.Lbrack) - fcomp.expr(e.X) - if e.Lo != nil { - fcomp.expr(e.Lo) - } else { - fcomp.emit(NONE) - } - if e.Hi != nil { - fcomp.expr(e.Hi) - } else { - fcomp.emit(NONE) - } - if e.Step != nil { - fcomp.expr(e.Step) - } else { - fcomp.emit(NONE) - } - fcomp.emit(SLICE) - - case *syntax.Comprehension: - if e.Curly { - fcomp.emit(MAKEDICT) - } else { - fcomp.emit1(MAKELIST, 0) - } - fcomp.comprehension(e, 0) - - case *syntax.TupleExpr: - fcomp.tuple(e.List) - - case *syntax.DictExpr: - fcomp.emit(MAKEDICT) - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - fcomp.emit(DUP) - fcomp.expr(entry.Key) - fcomp.expr(entry.Value) - fcomp.setPos(entry.Colon) - fcomp.emit(SETDICTUNIQ) - } - - case *syntax.UnaryExpr: - fcomp.expr(e.X) - fcomp.setPos(e.OpPos) - switch e.Op { - case syntax.MINUS: - fcomp.emit(UMINUS) - case syntax.PLUS: - fcomp.emit(UPLUS) - case syntax.NOT: - fcomp.emit(NOT) - case syntax.TILDE: - fcomp.emit(TILDE) - default: - log.Panicf("%s: unexpected unary op: %s", e.OpPos, e.Op) - } - - case *syntax.BinaryExpr: - switch e.Op { - // short-circuit operators - // TODO(adonovan): use ifelse to simplify conditions. - case syntax.OR: - // x or y => if x then x else y - done := fcomp.newBlock() - y := fcomp.newBlock() - - fcomp.expr(e.X) - fcomp.emit(DUP) - fcomp.condjump(CJMP, done, y) - - fcomp.block = y - fcomp.emit(POP) // discard X - fcomp.expr(e.Y) - fcomp.jump(done) - - fcomp.block = done - - case syntax.AND: - // x and y => if x then y else x - done := fcomp.newBlock() - y := fcomp.newBlock() - - fcomp.expr(e.X) - fcomp.emit(DUP) - fcomp.condjump(CJMP, y, done) - - fcomp.block = y - fcomp.emit(POP) // discard X - fcomp.expr(e.Y) - fcomp.jump(done) - - fcomp.block = done - - case syntax.PLUS: - fcomp.plus(e) - - default: - // all other strict binary operator (includes comparisons) - fcomp.expr(e.X) - fcomp.expr(e.Y) - fcomp.binop(e.OpPos, e.Op) - } - - case *syntax.DotExpr: - fcomp.expr(e.X) - fcomp.setPos(e.Dot) - fcomp.emit1(ATTR, fcomp.pcomp.nameIndex(e.Name.Name)) - - case *syntax.CallExpr: - fcomp.call(e) - - case *syntax.LambdaExpr: - fcomp.function(e.Function.(*resolve.Function)) - - default: - start, _ := e.Span() - log.Panicf("%s: unexpected expr %T", start, e) - } -} - -type summand struct { - x syntax.Expr - plusPos syntax.Position -} - -// plus emits optimized code for ((a+b)+...)+z that avoids naive -// quadratic behavior for strings, tuples, and lists, -// and folds together adjacent literals of the same type. -func (fcomp *fcomp) plus(e *syntax.BinaryExpr) { - // Gather all the right operands of the left tree of plusses. - // A tree (((a+b)+c)+d) becomes args=[a +b +c +d]. - args := make([]summand, 0, 2) // common case: 2 operands - for plus := e; ; { - args = append(args, summand{unparen(plus.Y), plus.OpPos}) - left := unparen(plus.X) - x, ok := left.(*syntax.BinaryExpr) - if !ok || x.Op != syntax.PLUS { - args = append(args, summand{x: left}) - break - } - plus = x - } - // Reverse args to syntactic order. - for i, n := 0, len(args)/2; i < n; i++ { - j := len(args) - 1 - i - args[i], args[j] = args[j], args[i] - } - - // Fold sums of adjacent literals of the same type: ""+"", []+[], ()+(). - out := args[:0] // compact in situ - for i := 0; i < len(args); { - j := i + 1 - if code := addable(args[i].x); code != 0 { - for j < len(args) && addable(args[j].x) == code { - j++ - } - if j > i+1 { - args[i].x = add(code, args[i:j]) - } - } - out = append(out, args[i]) - i = j - } - args = out - - // Emit code for an n-ary sum (n > 0). - fcomp.expr(args[0].x) - for _, summand := range args[1:] { - fcomp.expr(summand.x) - fcomp.setPos(summand.plusPos) - fcomp.emit(PLUS) - } - - // If len(args) > 2, use of an accumulator instead of a chain of - // PLUS operations may be more efficient. - // However, no gain was measured on a workload analogous to Bazel loading; - // TODO(adonovan): opt: re-evaluate on a Bazel analysis-like workload. - // - // We cannot use a single n-ary SUM operation - // a b c SUM<3> - // because we need to report a distinct error for each - // individual '+' operation, so three additional operations are - // needed: - // - // ACCSTART => create buffer and append to it - // ACCUM => append to buffer - // ACCEND => get contents of buffer - // - // For string, list, and tuple values, the interpreter can - // optimize these operations by using a mutable buffer. - // For all other types, ACCSTART and ACCEND would behave like - // the identity function and ACCUM behaves like PLUS. - // ACCUM must correctly support user-defined operations - // such as list+foo. - // - // fcomp.emit(ACCSTART) - // for _, summand := range args[1:] { - // fcomp.expr(summand.x) - // fcomp.setPos(summand.plusPos) - // fcomp.emit(ACCUM) - // } - // fcomp.emit(ACCEND) -} - -// addable reports whether e is a statically addable -// expression: a [s]tring, [b]ytes, [l]ist, or [t]uple. -func addable(e syntax.Expr) rune { - switch e := e.(type) { - case *syntax.Literal: - // TODO(adonovan): opt: support INT/FLOAT/BIGINT constant folding. - switch e.Token { - case syntax.STRING: - return 's' - case syntax.BYTES: - return 'b' - } - case *syntax.ListExpr: - return 'l' - case *syntax.TupleExpr: - return 't' - } - return 0 -} - -// add returns an expression denoting the sum of args, -// which are all addable values of the type indicated by code. -// The resulting syntax is degenerate, lacking position, etc. -func add(code rune, args []summand) syntax.Expr { - switch code { - case 's', 'b': - var buf strings.Builder - for _, arg := range args { - buf.WriteString(arg.x.(*syntax.Literal).Value.(string)) - } - tok := syntax.STRING - if code == 'b' { - tok = syntax.BYTES - } - return &syntax.Literal{Token: tok, Value: buf.String()} - case 'l': - var elems []syntax.Expr - for _, arg := range args { - elems = append(elems, arg.x.(*syntax.ListExpr).List...) - } - return &syntax.ListExpr{List: elems} - case 't': - var elems []syntax.Expr - for _, arg := range args { - elems = append(elems, arg.x.(*syntax.TupleExpr).List...) - } - return &syntax.TupleExpr{List: elems} - } - panic(code) -} - -func unparen(e syntax.Expr) syntax.Expr { - if p, ok := e.(*syntax.ParenExpr); ok { - return unparen(p.X) - } - return e -} - -func (fcomp *fcomp) binop(pos syntax.Position, op syntax.Token) { - // TODO(adonovan): simplify by assuming syntax and compiler constants align. - fcomp.setPos(pos) - switch op { - // arithmetic - case syntax.PLUS: - fcomp.emit(PLUS) - case syntax.MINUS: - fcomp.emit(MINUS) - case syntax.STAR: - fcomp.emit(STAR) - case syntax.SLASH: - fcomp.emit(SLASH) - case syntax.SLASHSLASH: - fcomp.emit(SLASHSLASH) - case syntax.PERCENT: - fcomp.emit(PERCENT) - case syntax.AMP: - fcomp.emit(AMP) - case syntax.PIPE: - fcomp.emit(PIPE) - case syntax.CIRCUMFLEX: - fcomp.emit(CIRCUMFLEX) - case syntax.LTLT: - fcomp.emit(LTLT) - case syntax.GTGT: - fcomp.emit(GTGT) - case syntax.IN: - fcomp.emit(IN) - case syntax.NOT_IN: - fcomp.emit(IN) - fcomp.emit(NOT) - - // comparisons - case syntax.EQL, - syntax.NEQ, - syntax.GT, - syntax.LT, - syntax.LE, - syntax.GE: - fcomp.emit(Opcode(op-syntax.EQL) + EQL) - - default: - log.Panicf("%s: unexpected binary op: %s", pos, op) - } -} - -func (fcomp *fcomp) call(call *syntax.CallExpr) { - // TODO(adonovan): opt: Use optimized path for calling methods - // of built-ins: x.f(...) to avoid materializing a closure. - // if dot, ok := call.Fcomp.(*syntax.DotExpr); ok { - // fcomp.expr(dot.X) - // fcomp.args(call) - // fcomp.emit1(CALL_ATTR, fcomp.name(dot.Name.Name)) - // return - // } - - // usual case - fcomp.expr(call.Fn) - op, arg := fcomp.args(call) - fcomp.setPos(call.Lparen) - fcomp.emit1(op, arg) -} - -// args emits code to push a tuple of positional arguments -// and a tuple of named arguments containing alternating keys and values. -// Either or both tuples may be empty (TODO(adonovan): optimize). -func (fcomp *fcomp) args(call *syntax.CallExpr) (op Opcode, arg uint32) { - var callmode int - // Compute the number of each kind of parameter. - var p, n int // number of positional, named arguments - var varargs, kwargs syntax.Expr - for _, arg := range call.Args { - if binary, ok := arg.(*syntax.BinaryExpr); ok && binary.Op == syntax.EQ { - - // named argument (name, value) - fcomp.string(binary.X.(*syntax.Ident).Name) - fcomp.expr(binary.Y) - n++ - continue - } - if unary, ok := arg.(*syntax.UnaryExpr); ok { - if unary.Op == syntax.STAR { - callmode |= 1 - varargs = unary.X - continue - } else if unary.Op == syntax.STARSTAR { - callmode |= 2 - kwargs = unary.X - continue - } - } - - // positional argument - fcomp.expr(arg) - p++ - } - - // Python2 and Python3 both permit named arguments - // to appear both before and after a *args argument: - // f(1, 2, x=3, *[4], y=5, **dict(z=6)) - // - // They also differ in their evaluation order: - // Python2: 1 2 3 5 4 6 (*args and **kwargs evaluated last) - // Python3: 1 2 4 3 5 6 (positional args evaluated before named args) - // Starlark-in-Java historically used a third order: - // Lexical: 1 2 3 4 5 6 (all args evaluated left-to-right) - // - // After discussion in github.com/bazelbuild/starlark#13, the - // spec now requires Starlark to statically reject named - // arguments after *args (e.g. y=5), and to use Python2-style - // evaluation order. This is both easy to implement and - // consistent with lexical order: - // - // f(1, 2, x=3, *[4], **dict(z=6)) # 1 2 3 4 6 - - // *args - if varargs != nil { - fcomp.expr(varargs) - } - - // **kwargs - if kwargs != nil { - fcomp.expr(kwargs) - } - - // TODO(adonovan): avoid this with a more flexible encoding. - if p >= 256 || n >= 256 { - // resolve already checked this; should be unreachable - panic("too many arguments in call") - } - - return CALL + Opcode(callmode), uint32(p<<8 | n) -} - -func (fcomp *fcomp) tuple(elems []syntax.Expr) { - for _, elem := range elems { - fcomp.expr(elem) - } - fcomp.emit1(MAKETUPLE, uint32(len(elems))) -} - -func (fcomp *fcomp) comprehension(comp *syntax.Comprehension, clauseIndex int) { - if clauseIndex == len(comp.Clauses) { - fcomp.emit(DUP) // accumulator - if comp.Curly { - // dict: {k:v for ...} - // Parser ensures that body is of form k:v. - // Python-style set comprehensions {body for vars in x} - // are not supported. - entry := comp.Body.(*syntax.DictEntry) - fcomp.expr(entry.Key) - fcomp.expr(entry.Value) - fcomp.setPos(entry.Colon) - fcomp.emit(SETDICT) - } else { - // list: [body for vars in x] - fcomp.expr(comp.Body) - fcomp.emit(APPEND) - } - return - } - - clause := comp.Clauses[clauseIndex] - switch clause := clause.(type) { - case *syntax.IfClause: - t := fcomp.newBlock() - done := fcomp.newBlock() - fcomp.ifelse(clause.Cond, t, done) - - fcomp.block = t - fcomp.comprehension(comp, clauseIndex+1) - fcomp.jump(done) - - fcomp.block = done - return - - case *syntax.ForClause: - // Keep consistent with ForStmt. - head := fcomp.newBlock() - body := fcomp.newBlock() - tail := fcomp.newBlock() - - fcomp.expr(clause.X) - fcomp.setPos(clause.For) - fcomp.emit(ITERPUSH) - fcomp.jump(head) - - fcomp.block = head - fcomp.condjump(ITERJMP, tail, body) - - fcomp.block = body - fcomp.assign(clause.For, clause.Vars) - fcomp.comprehension(comp, clauseIndex+1) - fcomp.jump(head) - - fcomp.block = tail - fcomp.emit(ITERPOP) - return - } - - start, _ := clause.Span() - log.Panicf("%s: unexpected comprehension clause %T", start, clause) -} - -func (fcomp *fcomp) function(f *resolve.Function) { - // Evaluation of the defaults may fail, so record the position. - fcomp.setPos(f.Pos) - - // To reduce allocation, we emit a combined tuple - // for the defaults and the freevars. - // The function knows where to split it at run time. - - // Generate tuple of parameter defaults. For: - // def f(p1, p2=dp2, p3=dp3, *, k1, k2=dk2, k3, **kwargs) - // the tuple is: - // (dp2, dp3, MANDATORY, dk2, MANDATORY). - ndefaults := 0 - seenStar := false - for _, param := range f.Params { - switch param := param.(type) { - case *syntax.BinaryExpr: - fcomp.expr(param.Y) - ndefaults++ - case *syntax.UnaryExpr: - seenStar = true // * or *args (also **kwargs) - case *syntax.Ident: - if seenStar { - fcomp.emit(MANDATORY) - ndefaults++ - } - } - } - - // Capture the cells of the function's - // free variables from the lexical environment. - for _, freevar := range f.FreeVars { - // Don't call fcomp.lookup because we want - // the cell itself, not its content. - switch freevar.Scope { - case resolve.Free: - fcomp.emit1(FREE, uint32(freevar.Index)) - case resolve.Cell: - fcomp.emit1(LOCAL, uint32(freevar.Index)) - } - } - - fcomp.emit1(MAKETUPLE, uint32(ndefaults+len(f.FreeVars))) - - funcode := fcomp.pcomp.function(f.Name, f.Pos, f.Body, f.Locals, f.FreeVars) - - if debug { - // TODO(adonovan): do compilations sequentially not as a tree, - // to make the log easier to read. - // Simplify by identifying Toplevel and functionIndex 0. - fmt.Fprintf(os.Stderr, "resuming %s @ %s\n", fcomp.fn.Name, fcomp.pos) - } - - // def f(a, *, b=1) has only 2 parameters. - numParams := len(f.Params) - if f.NumKwonlyParams > 0 && !f.HasVarargs { - numParams-- - } - - funcode.NumParams = numParams - funcode.NumKwonlyParams = f.NumKwonlyParams - funcode.HasVarargs = f.HasVarargs - funcode.HasKwargs = f.HasKwargs - fcomp.emit1(MAKEFUNC, fcomp.pcomp.functionIndex(funcode)) -} - -// ifelse emits a Boolean control flow decision. -// On return, the current block is unset. -func (fcomp *fcomp) ifelse(cond syntax.Expr, t, f *block) { - switch cond := cond.(type) { - case *syntax.UnaryExpr: - if cond.Op == syntax.NOT { - // if not x then goto t else goto f - // => - // if x then goto f else goto t - fcomp.ifelse(cond.X, f, t) - return - } - - case *syntax.BinaryExpr: - switch cond.Op { - case syntax.AND: - // if x and y then goto t else goto f - // => - // if x then ifelse(y, t, f) else goto f - fcomp.expr(cond.X) - y := fcomp.newBlock() - fcomp.condjump(CJMP, y, f) - - fcomp.block = y - fcomp.ifelse(cond.Y, t, f) - return - - case syntax.OR: - // if x or y then goto t else goto f - // => - // if x then goto t else ifelse(y, t, f) - fcomp.expr(cond.X) - y := fcomp.newBlock() - fcomp.condjump(CJMP, t, y) - - fcomp.block = y - fcomp.ifelse(cond.Y, t, f) - return - case syntax.NOT_IN: - // if x not in y then goto t else goto f - // => - // if x in y then goto f else goto t - copy := *cond - copy.Op = syntax.IN - fcomp.expr(©) - fcomp.condjump(CJMP, f, t) - return - } - } - - // general case - fcomp.expr(cond) - fcomp.condjump(CJMP, t, f) -} diff --git a/vendor/go.starlark.net/internal/compile/serial.go b/vendor/go.starlark.net/internal/compile/serial.go deleted file mode 100644 index adadabfc2..000000000 --- a/vendor/go.starlark.net/internal/compile/serial.go +++ /dev/null @@ -1,395 +0,0 @@ -package compile - -// This file defines functions to read and write a compile.Program to a file. -// -// It is the client's responsibility to avoid version skew between the -// compiler used to produce a file and the interpreter that consumes it. -// The version number is provided as a constant. -// Incompatible protocol changes should also increment the version number. -// -// Encoding -// -// Program: -// "sky!" [4]byte # magic number -// str uint32le # offset of section -// version varint # must match Version -// filename string -// numloads varint -// loads []Ident -// numnames varint -// names []string -// numconsts varint -// consts []Constant -// numglobals varint -// globals []Ident -// toplevel Funcode -// numfuncs varint -// funcs []Funcode -// []byte # concatenation of all referenced strings -// EOF -// -// Funcode: -// id Ident -// code []byte -// pclinetablen varint -// pclinetab []varint -// numlocals varint -// locals []Ident -// numcells varint -// cells []int -// numfreevars varint -// freevar []Ident -// maxstack varint -// numparams varint -// numkwonlyparams varint -// hasvarargs varint (0 or 1) -// haskwargs varint (0 or 1) -// -// Ident: -// filename string -// line, col varint -// -// Constant: # type data -// type varint # 0=string string -// data ... # 1=bytes string -// # 2=int varint -// # 3=float varint (bits as uint64) -// # 4=bigint string (decimal ASCII text) -// -// The encoding starts with a four-byte magic number. -// The next four bytes are a little-endian uint32 -// that provides the offset of the string section -// at the end of the file, which contains the ordered -// concatenation of all strings referenced by the -// program. This design permits the decoder to read -// the first and second parts of the file into different -// memory allocations: the first (the encoded program) -// is transient, but the second (the strings) persists -// for the life of the Program. -// -// Within the encoded program, all strings are referred -// to by their length. As the encoder and decoder process -// the entire file sequentially, they are in lock step, -// so the start offset of each string is implicit. -// -// Program.Code is represented as a []byte slice to permit -// modification when breakpoints are set. All other strings -// are represented as strings. They all (unsafely) share the -// same backing byte slice. -// -// Aside from the str field, all integers are encoded as varints. - -import ( - "encoding/binary" - "fmt" - "math" - "math/big" - debugpkg "runtime/debug" - "unsafe" - - "go.starlark.net/syntax" -) - -const magic = "!sky" - -// Encode encodes a compiled Starlark program. -func (prog *Program) Encode() []byte { - var e encoder - e.p = append(e.p, magic...) - e.p = append(e.p, "????"...) // string data offset; filled in later - e.int(Version) - e.string(prog.Toplevel.Pos.Filename()) - e.bindings(prog.Loads) - e.int(len(prog.Names)) - for _, name := range prog.Names { - e.string(name) - } - e.int(len(prog.Constants)) - for _, c := range prog.Constants { - switch c := c.(type) { - case string: - e.int(0) - e.string(c) - case Bytes: - e.int(1) - e.string(string(c)) - case int64: - e.int(2) - e.int64(c) - case float64: - e.int(3) - e.uint64(math.Float64bits(c)) - case *big.Int: - e.int(4) - e.string(c.Text(10)) - } - } - e.bindings(prog.Globals) - e.function(prog.Toplevel) - e.int(len(prog.Functions)) - for _, fn := range prog.Functions { - e.function(fn) - } - - // Patch in the offset of the string data section. - binary.LittleEndian.PutUint32(e.p[4:8], uint32(len(e.p))) - - return append(e.p, e.s...) -} - -type encoder struct { - p []byte // encoded program - s []byte // strings - tmp [binary.MaxVarintLen64]byte -} - -func (e *encoder) int(x int) { - e.int64(int64(x)) -} - -func (e *encoder) int64(x int64) { - n := binary.PutVarint(e.tmp[:], x) - e.p = append(e.p, e.tmp[:n]...) -} - -func (e *encoder) uint64(x uint64) { - n := binary.PutUvarint(e.tmp[:], x) - e.p = append(e.p, e.tmp[:n]...) -} - -func (e *encoder) string(s string) { - e.int(len(s)) - e.s = append(e.s, s...) -} - -func (e *encoder) bytes(b []byte) { - e.int(len(b)) - e.s = append(e.s, b...) -} - -func (e *encoder) binding(bind Binding) { - e.string(bind.Name) - e.int(int(bind.Pos.Line)) - e.int(int(bind.Pos.Col)) -} - -func (e *encoder) bindings(binds []Binding) { - e.int(len(binds)) - for _, bind := range binds { - e.binding(bind) - } -} - -func (e *encoder) function(fn *Funcode) { - e.binding(Binding{fn.Name, fn.Pos}) - e.string(fn.Doc) - e.bytes(fn.Code) - e.int(len(fn.pclinetab)) - for _, x := range fn.pclinetab { - e.int64(int64(x)) - } - e.bindings(fn.Locals) - e.int(len(fn.Cells)) - for _, index := range fn.Cells { - e.int(index) - } - e.bindings(fn.Freevars) - e.int(fn.MaxStack) - e.int(fn.NumParams) - e.int(fn.NumKwonlyParams) - e.int(b2i(fn.HasVarargs)) - e.int(b2i(fn.HasKwargs)) -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -// DecodeProgram decodes a compiled Starlark program from data. -func DecodeProgram(data []byte) (_ *Program, err error) { - if len(data) < len(magic) { - return nil, fmt.Errorf("not a compiled module: no magic number") - } - if got := string(data[:4]); got != magic { - return nil, fmt.Errorf("not a compiled module: got magic number %q, want %q", - got, magic) - } - defer func() { - if x := recover(); x != nil { - debugpkg.PrintStack() - err = fmt.Errorf("internal error while decoding program: %v", x) - } - }() - - offset := binary.LittleEndian.Uint32(data[4:8]) - d := decoder{ - p: data[8:offset], - s: append([]byte(nil), data[offset:]...), // allocate a copy, which will persist - } - - if v := d.int(); v != Version { - return nil, fmt.Errorf("version mismatch: read %d, want %d", v, Version) - } - - filename := d.string() - d.filename = &filename - - loads := d.bindings() - - names := make([]string, d.int()) - for i := range names { - names[i] = d.string() - } - - // constants - constants := make([]interface{}, d.int()) - for i := range constants { - var c interface{} - switch d.int() { - case 0: - c = d.string() - case 1: - c = Bytes(d.string()) - case 2: - c = d.int64() - case 3: - c = math.Float64frombits(d.uint64()) - case 4: - c, _ = new(big.Int).SetString(d.string(), 10) - } - constants[i] = c - } - - globals := d.bindings() - toplevel := d.function() - funcs := make([]*Funcode, d.int()) - for i := range funcs { - funcs[i] = d.function() - } - - prog := &Program{ - Loads: loads, - Names: names, - Constants: constants, - Globals: globals, - Functions: funcs, - Toplevel: toplevel, - } - toplevel.Prog = prog - for _, f := range funcs { - f.Prog = prog - } - - if len(d.p)+len(d.s) > 0 { - return nil, fmt.Errorf("internal error: unconsumed data during decoding") - } - - return prog, nil -} - -type decoder struct { - p []byte // encoded program - s []byte // strings - filename *string // (indirect to avoid keeping decoder live) -} - -func (d *decoder) int() int { - return int(d.int64()) -} - -func (d *decoder) int64() int64 { - x, len := binary.Varint(d.p[:]) - d.p = d.p[len:] - return x -} - -func (d *decoder) uint64() uint64 { - x, len := binary.Uvarint(d.p[:]) - d.p = d.p[len:] - return x -} - -func (d *decoder) string() (s string) { - if slice := d.bytes(); len(slice) > 0 { - // Avoid a memory allocation for each string - // by unsafely aliasing slice. - type string struct { - data *byte - len int - } - ptr := (*string)(unsafe.Pointer(&s)) - ptr.data = &slice[0] - ptr.len = len(slice) - } - return s -} - -func (d *decoder) bytes() []byte { - len := d.int() - r := d.s[:len:len] - d.s = d.s[len:] - return r -} - -func (d *decoder) binding() Binding { - name := d.string() - line := int32(d.int()) - col := int32(d.int()) - return Binding{Name: name, Pos: syntax.MakePosition(d.filename, line, col)} -} - -func (d *decoder) bindings() []Binding { - bindings := make([]Binding, d.int()) - for i := range bindings { - bindings[i] = d.binding() - } - return bindings -} - -func (d *decoder) ints() []int { - ints := make([]int, d.int()) - for i := range ints { - ints[i] = d.int() - } - return ints -} - -func (d *decoder) bool() bool { return d.int() != 0 } - -func (d *decoder) function() *Funcode { - id := d.binding() - doc := d.string() - code := d.bytes() - pclinetab := make([]uint16, d.int()) - for i := range pclinetab { - pclinetab[i] = uint16(d.int()) - } - locals := d.bindings() - cells := d.ints() - freevars := d.bindings() - maxStack := d.int() - numParams := d.int() - numKwonlyParams := d.int() - hasVarargs := d.int() != 0 - hasKwargs := d.int() != 0 - return &Funcode{ - // Prog is filled in later. - Pos: id.Pos, - Name: id.Name, - Doc: doc, - Code: code, - pclinetab: pclinetab, - Locals: locals, - Cells: cells, - Freevars: freevars, - MaxStack: maxStack, - NumParams: numParams, - NumKwonlyParams: numKwonlyParams, - HasVarargs: hasVarargs, - HasKwargs: hasKwargs, - } -} diff --git a/vendor/go.starlark.net/internal/spell/spell.go b/vendor/go.starlark.net/internal/spell/spell.go deleted file mode 100644 index 7739fabaa..000000000 --- a/vendor/go.starlark.net/internal/spell/spell.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package spell file defines a simple spelling checker for use in attribute errors -// such as "no such field .foo; did you mean .food?". -package spell - -import ( - "strings" - "unicode" -) - -// Nearest returns the element of candidates -// nearest to x using the Levenshtein metric, -// or "" if none were promising. -func Nearest(x string, candidates []string) string { - // Ignore underscores and case when matching. - fold := func(s string) string { - return strings.Map(func(r rune) rune { - if r == '_' { - return -1 - } - return unicode.ToLower(r) - }, s) - } - - x = fold(x) - - var best string - bestD := (len(x) + 1) / 2 // allow up to 50% typos - for _, c := range candidates { - d := levenshtein(x, fold(c), bestD) - if d < bestD { - bestD = d - best = c - } - } - return best -} - -// levenshtein returns the non-negative Levenshtein edit distance -// between the byte strings x and y. -// -// If the computed distance exceeds max, -// the function may return early with an approximate value > max. -func levenshtein(x, y string, max int) int { - // This implementation is derived from one by Laurent Le Brun in - // Bazel that uses the single-row space efficiency trick - // described at bitbucket.org/clearer/iosifovich. - - // Let x be the shorter string. - if len(x) > len(y) { - x, y = y, x - } - - // Remove common prefix. - for i := 0; i < len(x); i++ { - if x[i] != y[i] { - x = x[i:] - y = y[i:] - break - } - } - if x == "" { - return len(y) - } - - if d := abs(len(x) - len(y)); d > max { - return d // excessive length divergence - } - - row := make([]int, len(y)+1) - for i := range row { - row[i] = i - } - - for i := 1; i <= len(x); i++ { - row[0] = i - best := i - prev := i - 1 - for j := 1; j <= len(y); j++ { - a := prev + b2i(x[i-1] != y[j-1]) // substitution - b := 1 + row[j-1] // deletion - c := 1 + row[j] // insertion - k := min(a, min(b, c)) - prev, row[j] = row[j], k - best = min(best, k) - } - if best > max { - return best - } - } - return row[len(y)] -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} - -func abs(x int) int { - if x >= 0 { - return x - } else { - return -x - } -} diff --git a/vendor/go.starlark.net/resolve/binding.go b/vendor/go.starlark.net/resolve/binding.go deleted file mode 100644 index 6b99f4b97..000000000 --- a/vendor/go.starlark.net/resolve/binding.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package resolve - -import "go.starlark.net/syntax" - -// This file defines resolver data types saved in the syntax tree. -// We cannot guarantee API stability for these types -// as they are closely tied to the implementation. - -// A Binding contains resolver information about an identifer. -// The resolver populates the Binding field of each syntax.Identifier. -// The Binding ties together all identifiers that denote the same variable. -type Binding struct { - Scope Scope - - // Index records the index into the enclosing - // - {DefStmt,File}.Locals, if Scope==Local - // - DefStmt.FreeVars, if Scope==Free - // - File.Globals, if Scope==Global. - // It is zero if Scope is Predeclared, Universal, or Undefined. - Index int - - First *syntax.Ident // first binding use (iff Scope==Local/Free/Global) -} - -// The Scope of Binding indicates what kind of scope it has. -type Scope uint8 - -const ( - Undefined Scope = iota // name is not defined - Local // name is local to its function or file - Cell // name is function-local but shared with a nested function - Free // name is cell of some enclosing function - Global // name is global to module - Predeclared // name is predeclared for this module (e.g. glob) - Universal // name is universal (e.g. len) -) - -var scopeNames = [...]string{ - Undefined: "undefined", - Local: "local", - Cell: "cell", - Free: "free", - Global: "global", - Predeclared: "predeclared", - Universal: "universal", -} - -func (scope Scope) String() string { return scopeNames[scope] } - -// A Module contains resolver information about a file. -// The resolver populates the Module field of each syntax.File. -type Module struct { - Locals []*Binding // the file's (comprehension-)local variables - Globals []*Binding // the file's global variables -} - -// A Function contains resolver information about a named or anonymous function. -// The resolver populates the Function field of each syntax.DefStmt and syntax.LambdaExpr. -type Function struct { - Pos syntax.Position // of DEF or LAMBDA - Name string // name of def, or "lambda" - Params []syntax.Expr // param = ident | ident=expr | * | *ident | **ident - Body []syntax.Stmt // contains synthetic 'return expr' for lambda - - HasVarargs bool // whether params includes *args (convenience) - HasKwargs bool // whether params includes **kwargs (convenience) - NumKwonlyParams int // number of keyword-only optional parameters - Locals []*Binding // this function's local/cell variables, parameters first - FreeVars []*Binding // enclosing cells to capture in closure -} diff --git a/vendor/go.starlark.net/resolve/resolve.go b/vendor/go.starlark.net/resolve/resolve.go deleted file mode 100644 index 09b9acdea..000000000 --- a/vendor/go.starlark.net/resolve/resolve.go +++ /dev/null @@ -1,969 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package resolve defines a name-resolution pass for Starlark abstract -// syntax trees. -// -// The resolver sets the Locals and FreeVars arrays of each DefStmt and -// the LocalIndex field of each syntax.Ident that refers to a local or -// free variable. It also sets the Locals array of a File for locals -// bound by top-level comprehensions and load statements. -// Identifiers for global variables do not get an index. -package resolve // import "go.starlark.net/resolve" - -// All references to names are statically resolved. Names may be -// predeclared, global, or local to a function or file. -// File-local variables include those bound by top-level comprehensions -// and by load statements. ("Top-level" means "outside of any function".) -// The resolver maps each global name to a small integer and each local -// name to a small integer; these integers enable a fast and compact -// representation of globals and locals in the evaluator. -// -// As an optimization, the resolver classifies each predeclared name as -// either universal (e.g. None, len) or per-module (e.g. glob in Bazel's -// build language), enabling the evaluator to share the representation -// of the universal environment across all modules. -// -// The lexical environment is a tree of blocks with the file block at -// its root. The file's child blocks may be of two kinds: functions -// and comprehensions, and these may have further children of either -// kind. -// -// Python-style resolution requires multiple passes because a name is -// determined to be local to a function only if the function contains a -// "binding" use of it; similarly, a name is determined to be global (as -// opposed to predeclared) if the module contains a top-level binding use. -// Unlike ordinary top-level assignments, the bindings created by load -// statements are local to the file block. -// A non-binding use may lexically precede the binding to which it is resolved. -// In the first pass, we inspect each function, recording in -// 'uses' each identifier and the environment block in which it occurs. -// If a use of a name is binding, such as a function parameter or -// assignment, we add the name to the block's bindings mapping and add a -// local variable to the enclosing function. -// -// As we finish resolving each function, we inspect all the uses within -// that function and discard ones that were found to be function-local. The -// remaining ones must be either free (local to some lexically enclosing -// function), or top-level (global, predeclared, or file-local), but we cannot tell -// which until we have finished inspecting the outermost enclosing -// function. At that point, we can distinguish local from top-level names -// (and this is when Python would compute free variables). -// -// However, Starlark additionally requires that all references to global -// names are satisfied by some declaration in the current module; -// Starlark permits a function to forward-reference a global or file-local -// that has not -// been declared yet so long as it is declared before the end of the -// module. So, instead of re-resolving the unresolved references after -// each top-level function, we defer this until the end of the module -// and ensure that all such references are satisfied by some definition. -// -// At the end of the module, we visit each of the nested function blocks -// in bottom-up order, doing a recursive lexical lookup for each -// unresolved name. If the name is found to be local to some enclosing -// function, we must create a DefStmt.FreeVar (capture) parameter for -// each intervening function. We enter these synthetic bindings into -// the bindings map so that we create at most one freevar per name. If -// the name was not local, we check that it was defined at module level. -// -// We resolve all uses of locals in the module (due to load statements -// and comprehensions) in a similar way and compute the file's set of -// local variables. -// -// Starlark enforces that all global names are assigned at most once on -// all control flow paths by forbidding if/else statements and loops at -// top level. A global may be used before it is defined, leading to a -// dynamic error. However, the AllowGlobalReassign flag (really: allow -// top-level reassign) makes the resolver allow multiple to a variable -// at top-level. It also allows if-, for-, and while-loops at top-level, -// which in turn may make the evaluator dynamically assign multiple -// values to a variable at top-level. (These two roles should be separated.) - -import ( - "fmt" - "log" - "sort" - "strings" - - "go.starlark.net/internal/spell" - "go.starlark.net/syntax" -) - -const debug = false -const doesnt = "this Starlark dialect does not " - -// global options -// These features are either not standard Starlark (yet), or deprecated -// features of the BUILD language, so we put them behind flags. -var ( - AllowSet = false // allow the 'set' built-in - AllowGlobalReassign = false // allow reassignment to top-level names; also, allow if/for/while at top-level - AllowRecursion = false // allow while statements and recursive functions - LoadBindsGlobally = false // load creates global not file-local bindings (deprecated) - - // obsolete flags for features that are now standard. No effect. - AllowNestedDef = true - AllowLambda = true - AllowFloat = true - AllowBitwise = true -) - -// File resolves the specified file and records information about the -// module in file.Module. -// -// The isPredeclared and isUniversal predicates report whether a name is -// a pre-declared identifier (visible in the current module) or a -// universal identifier (visible in every module). -// Clients should typically pass predeclared.Has for the first and -// starlark.Universe.Has for the second, where predeclared is the -// module's StringDict of predeclared names and starlark.Universe is the -// standard set of built-ins. -// The isUniverse predicate is supplied a parameter to avoid a cyclic -// dependency upon starlark.Universe, not because users should ever need -// to redefine it. -func File(file *syntax.File, isPredeclared, isUniversal func(name string) bool) error { - return REPLChunk(file, nil, isPredeclared, isUniversal) -} - -// REPLChunk is a generalization of the File function that supports a -// non-empty initial global block, as occurs in a REPL. -func REPLChunk(file *syntax.File, isGlobal, isPredeclared, isUniversal func(name string) bool) error { - r := newResolver(isGlobal, isPredeclared, isUniversal) - r.stmts(file.Stmts) - - r.env.resolveLocalUses() - - // At the end of the module, resolve all non-local variable references, - // computing closures. - // Function bodies may contain forward references to later global declarations. - r.resolveNonLocalUses(r.env) - - file.Module = &Module{ - Locals: r.moduleLocals, - Globals: r.moduleGlobals, - } - - if len(r.errors) > 0 { - return r.errors - } - return nil -} - -// Expr resolves the specified expression. -// It returns the local variables bound within the expression. -// -// The isPredeclared and isUniversal predicates behave as for the File function. -func Expr(expr syntax.Expr, isPredeclared, isUniversal func(name string) bool) ([]*Binding, error) { - r := newResolver(nil, isPredeclared, isUniversal) - r.expr(expr) - r.env.resolveLocalUses() - r.resolveNonLocalUses(r.env) // globals & universals - if len(r.errors) > 0 { - return nil, r.errors - } - return r.moduleLocals, nil -} - -// An ErrorList is a non-empty list of resolver error messages. -type ErrorList []Error // len > 0 - -func (e ErrorList) Error() string { return e[0].Error() } - -// An Error describes the nature and position of a resolver error. -type Error struct { - Pos syntax.Position - Msg string -} - -func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } - -func newResolver(isGlobal, isPredeclared, isUniversal func(name string) bool) *resolver { - file := new(block) - return &resolver{ - file: file, - env: file, - isGlobal: isGlobal, - isPredeclared: isPredeclared, - isUniversal: isUniversal, - globals: make(map[string]*Binding), - predeclared: make(map[string]*Binding), - } -} - -type resolver struct { - // env is the current local environment: - // a linked list of blocks, innermost first. - // The tail of the list is the file block. - env *block - file *block // file block (contains load bindings) - - // moduleLocals contains the local variables of the module - // (due to load statements and comprehensions outside any function). - // moduleGlobals contains the global variables of the module. - moduleLocals []*Binding - moduleGlobals []*Binding - - // globals maps each global name in the module to its binding. - // predeclared does the same for predeclared and universal names. - globals map[string]*Binding - predeclared map[string]*Binding - - // These predicates report whether a name is - // pre-declared, either in this module or universally, - // or already declared in the module globals (as in a REPL). - // isGlobal may be nil. - isGlobal, isPredeclared, isUniversal func(name string) bool - - loops int // number of enclosing for/while loops - ifstmts int // number of enclosing if statements loops - - errors ErrorList -} - -// container returns the innermost enclosing "container" block: -// a function (function != nil) or file (function == nil). -// Container blocks accumulate local variable bindings. -func (r *resolver) container() *block { - for b := r.env; ; b = b.parent { - if b.function != nil || b == r.file { - return b - } - } -} - -func (r *resolver) push(b *block) { - r.env.children = append(r.env.children, b) - b.parent = r.env - r.env = b -} - -func (r *resolver) pop() { r.env = r.env.parent } - -type block struct { - parent *block // nil for file block - - // In the file (root) block, both these fields are nil. - function *Function // only for function blocks - comp *syntax.Comprehension // only for comprehension blocks - - // bindings maps a name to its binding. - // A local binding has an index into its innermost enclosing container's locals array. - // A free binding has an index into its innermost enclosing function's freevars array. - bindings map[string]*Binding - - // children records the child blocks of the current one. - children []*block - - // uses records all identifiers seen in this container (function or file), - // and a reference to the environment in which they appear. - // As we leave each container block, we resolve them, - // so that only free and global ones remain. - // At the end of each top-level function we compute closures. - uses []use -} - -func (b *block) bind(name string, bind *Binding) { - if b.bindings == nil { - b.bindings = make(map[string]*Binding) - } - b.bindings[name] = bind -} - -func (b *block) String() string { - if b.function != nil { - return "function block at " + fmt.Sprint(b.function.Pos) - } - if b.comp != nil { - return "comprehension block at " + fmt.Sprint(b.comp.Span()) - } - return "file block" -} - -func (r *resolver) errorf(posn syntax.Position, format string, args ...interface{}) { - r.errors = append(r.errors, Error{posn, fmt.Sprintf(format, args...)}) -} - -// A use records an identifier and the environment in which it appears. -type use struct { - id *syntax.Ident - env *block -} - -// bind creates a binding for id: a global (not file-local) -// binding at top-level, a local binding otherwise. -// At top-level, it reports an error if a global or file-local -// binding already exists, unless AllowGlobalReassign. -// It sets id.Binding to the binding (whether old or new), -// and returns whether a binding already existed. -func (r *resolver) bind(id *syntax.Ident) bool { - // Binding outside any local (comprehension/function) block? - if r.env == r.file { - bind, ok := r.file.bindings[id.Name] - if !ok { - bind, ok = r.globals[id.Name] - if !ok { - // first global binding of this name - bind = &Binding{ - First: id, - Scope: Global, - Index: len(r.moduleGlobals), - } - r.globals[id.Name] = bind - r.moduleGlobals = append(r.moduleGlobals, bind) - } - } - if ok && !AllowGlobalReassign { - r.errorf(id.NamePos, "cannot reassign %s %s declared at %s", - bind.Scope, id.Name, bind.First.NamePos) - } - id.Binding = bind - return ok - } - - return r.bindLocal(id) -} - -func (r *resolver) bindLocal(id *syntax.Ident) bool { - // Mark this name as local to current block. - // Assign it a new local (positive) index in the current container. - _, ok := r.env.bindings[id.Name] - if !ok { - var locals *[]*Binding - if fn := r.container().function; fn != nil { - locals = &fn.Locals - } else { - locals = &r.moduleLocals - } - bind := &Binding{ - First: id, - Scope: Local, - Index: len(*locals), - } - r.env.bind(id.Name, bind) - *locals = append(*locals, bind) - } - - r.use(id) - return ok -} - -func (r *resolver) use(id *syntax.Ident) { - use := use{id, r.env} - - // The spec says that if there is a global binding of a name - // then all references to that name in that block refer to the - // global, even if the use precedes the def---just as for locals. - // For example, in this code, - // - // print(len); len=1; print(len) - // - // both occurrences of len refer to the len=1 binding, which - // completely shadows the predeclared len function. - // - // The rationale for these semantics, which differ from Python, - // is that the static meaning of len (a reference to a global) - // does not change depending on where it appears in the file. - // Of course, its dynamic meaning does change, from an error - // into a valid reference, so it's not clear these semantics - // have any practical advantage. - // - // In any case, the Bazel implementation lags behind the spec - // and follows Python behavior, so the first use of len refers - // to the predeclared function. This typically used in a BUILD - // file that redefines a predeclared name half way through, - // for example: - // - // proto_library(...) # built-in rule - // load("myproto.bzl", "proto_library") - // proto_library(...) # user-defined rule - // - // We will piggyback support for the legacy semantics on the - // AllowGlobalReassign flag, which is loosely related and also - // required for Bazel. - if AllowGlobalReassign && r.env == r.file { - r.useToplevel(use) - return - } - - b := r.container() - b.uses = append(b.uses, use) -} - -// useToplevel resolves use.id as a reference to a name visible at top-level. -// The use.env field captures the original environment for error reporting. -func (r *resolver) useToplevel(use use) (bind *Binding) { - id := use.id - - if prev, ok := r.file.bindings[id.Name]; ok { - // use of load-defined name in file block - bind = prev - } else if prev, ok := r.globals[id.Name]; ok { - // use of global declared by module - bind = prev - } else if r.isGlobal != nil && r.isGlobal(id.Name) { - // use of global defined in a previous REPL chunk - bind = &Binding{ - First: id, // wrong: this is not even a binding use - Scope: Global, - Index: len(r.moduleGlobals), - } - r.globals[id.Name] = bind - r.moduleGlobals = append(r.moduleGlobals, bind) - } else if prev, ok := r.predeclared[id.Name]; ok { - // repeated use of predeclared or universal - bind = prev - } else if r.isPredeclared(id.Name) { - // use of pre-declared name - bind = &Binding{Scope: Predeclared} - r.predeclared[id.Name] = bind // save it - } else if r.isUniversal(id.Name) { - // use of universal name - if !AllowSet && id.Name == "set" { - r.errorf(id.NamePos, doesnt+"support sets") - } - bind = &Binding{Scope: Universal} - r.predeclared[id.Name] = bind // save it - } else { - bind = &Binding{Scope: Undefined} - var hint string - if n := r.spellcheck(use); n != "" { - hint = fmt.Sprintf(" (did you mean %s?)", n) - } - r.errorf(id.NamePos, "undefined: %s%s", id.Name, hint) - } - id.Binding = bind - return bind -} - -// spellcheck returns the most likely misspelling of -// the name use.id in the environment use.env. -func (r *resolver) spellcheck(use use) string { - var names []string - - // locals - for b := use.env; b != nil; b = b.parent { - for name := range b.bindings { - names = append(names, name) - } - } - - // globals - // - // We have no way to enumerate the sets whose membership - // tests are isPredeclared, isUniverse, and isGlobal, - // which includes prior names in the REPL session. - for _, bind := range r.moduleGlobals { - names = append(names, bind.First.Name) - } - - sort.Strings(names) - return spell.Nearest(use.id.Name, names) -} - -// resolveLocalUses is called when leaving a container (function/module) -// block. It resolves all uses of locals/cells within that block. -func (b *block) resolveLocalUses() { - unresolved := b.uses[:0] - for _, use := range b.uses { - if bind := lookupLocal(use); bind != nil && (bind.Scope == Local || bind.Scope == Cell) { - use.id.Binding = bind - } else { - unresolved = append(unresolved, use) - } - } - b.uses = unresolved -} - -func (r *resolver) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - r.stmt(stmt) - } -} - -func (r *resolver) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - r.expr(stmt.X) - - case *syntax.BranchStmt: - if r.loops == 0 && (stmt.Token == syntax.BREAK || stmt.Token == syntax.CONTINUE) { - r.errorf(stmt.TokenPos, "%s not in a loop", stmt.Token) - } - - case *syntax.IfStmt: - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.If, "if statement not within a function") - } - r.expr(stmt.Cond) - r.ifstmts++ - r.stmts(stmt.True) - r.stmts(stmt.False) - r.ifstmts-- - - case *syntax.AssignStmt: - r.expr(stmt.RHS) - isAugmented := stmt.Op != syntax.EQ - r.assign(stmt.LHS, isAugmented) - - case *syntax.DefStmt: - r.bind(stmt.Name) - fn := &Function{ - Name: stmt.Name.Name, - Pos: stmt.Def, - Params: stmt.Params, - Body: stmt.Body, - } - stmt.Function = fn - r.function(fn, stmt.Def) - - case *syntax.ForStmt: - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.For, "for loop not within a function") - } - r.expr(stmt.X) - const isAugmented = false - r.assign(stmt.Vars, isAugmented) - r.loops++ - r.stmts(stmt.Body) - r.loops-- - - case *syntax.WhileStmt: - if !AllowRecursion { - r.errorf(stmt.While, doesnt+"support while loops") - } - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.While, "while loop not within a function") - } - r.expr(stmt.Cond) - r.loops++ - r.stmts(stmt.Body) - r.loops-- - - case *syntax.ReturnStmt: - if r.container().function == nil { - r.errorf(stmt.Return, "return statement not within a function") - } - if stmt.Result != nil { - r.expr(stmt.Result) - } - - case *syntax.LoadStmt: - // A load statement may not be nested in any other statement. - if r.container().function != nil { - r.errorf(stmt.Load, "load statement within a function") - } else if r.loops > 0 { - r.errorf(stmt.Load, "load statement within a loop") - } else if r.ifstmts > 0 { - r.errorf(stmt.Load, "load statement within a conditional") - } - - for i, from := range stmt.From { - if from.Name == "" { - r.errorf(from.NamePos, "load: empty identifier") - continue - } - if from.Name[0] == '_' { - r.errorf(from.NamePos, "load: names with leading underscores are not exported: %s", from.Name) - } - - id := stmt.To[i] - if LoadBindsGlobally { - r.bind(id) - } else if r.bindLocal(id) && !AllowGlobalReassign { - // "Global" in AllowGlobalReassign is a misnomer for "toplevel". - // Sadly we can't report the previous declaration - // as id.Binding may not be set yet. - r.errorf(id.NamePos, "cannot reassign top-level %s", id.Name) - } - } - - default: - log.Panicf("unexpected stmt %T", stmt) - } -} - -func (r *resolver) assign(lhs syntax.Expr, isAugmented bool) { - switch lhs := lhs.(type) { - case *syntax.Ident: - // x = ... - r.bind(lhs) - - case *syntax.IndexExpr: - // x[i] = ... - r.expr(lhs.X) - r.expr(lhs.Y) - - case *syntax.DotExpr: - // x.f = ... - r.expr(lhs.X) - - case *syntax.TupleExpr: - // (x, y) = ... - if isAugmented { - r.errorf(syntax.Start(lhs), "can't use tuple expression in augmented assignment") - } - for _, elem := range lhs.List { - r.assign(elem, isAugmented) - } - - case *syntax.ListExpr: - // [x, y, z] = ... - if isAugmented { - r.errorf(syntax.Start(lhs), "can't use list expression in augmented assignment") - } - for _, elem := range lhs.List { - r.assign(elem, isAugmented) - } - - case *syntax.ParenExpr: - r.assign(lhs.X, isAugmented) - - default: - name := strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", lhs), "*syntax.")) - r.errorf(syntax.Start(lhs), "can't assign to %s", name) - } -} - -func (r *resolver) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.Ident: - r.use(e) - - case *syntax.Literal: - - case *syntax.ListExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.CondExpr: - r.expr(e.Cond) - r.expr(e.True) - r.expr(e.False) - - case *syntax.IndexExpr: - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DictEntry: - r.expr(e.Key) - r.expr(e.Value) - - case *syntax.SliceExpr: - r.expr(e.X) - if e.Lo != nil { - r.expr(e.Lo) - } - if e.Hi != nil { - r.expr(e.Hi) - } - if e.Step != nil { - r.expr(e.Step) - } - - case *syntax.Comprehension: - // The 'in' operand of the first clause (always a ForClause) - // is resolved in the outer block; consider: [x for x in x]. - clause := e.Clauses[0].(*syntax.ForClause) - r.expr(clause.X) - - // A list/dict comprehension defines a new lexical block. - // Locals defined within the block will be allotted - // distinct slots in the locals array of the innermost - // enclosing container (function/module) block. - r.push(&block{comp: e}) - - const isAugmented = false - r.assign(clause.Vars, isAugmented) - - for _, clause := range e.Clauses[1:] { - switch clause := clause.(type) { - case *syntax.IfClause: - r.expr(clause.Cond) - case *syntax.ForClause: - r.assign(clause.Vars, isAugmented) - r.expr(clause.X) - } - } - r.expr(e.Body) // body may be *DictEntry - r.pop() - - case *syntax.TupleExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.DictExpr: - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - r.expr(entry.Key) - r.expr(entry.Value) - } - - case *syntax.UnaryExpr: - r.expr(e.X) - - case *syntax.BinaryExpr: - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DotExpr: - r.expr(e.X) - // ignore e.Name - - case *syntax.CallExpr: - r.expr(e.Fn) - var seenVarargs, seenKwargs bool - var seenName map[string]bool - var n, p int - for _, arg := range e.Args { - pos, _ := arg.Span() - if unop, ok := arg.(*syntax.UnaryExpr); ok && unop.Op == syntax.STARSTAR { - // **kwargs - if seenKwargs { - r.errorf(pos, "multiple **kwargs not allowed") - } - seenKwargs = true - r.expr(arg) - } else if ok && unop.Op == syntax.STAR { - // *args - if seenKwargs { - r.errorf(pos, "*args may not follow **kwargs") - } else if seenVarargs { - r.errorf(pos, "multiple *args not allowed") - } - seenVarargs = true - r.expr(arg) - } else if binop, ok := arg.(*syntax.BinaryExpr); ok && binop.Op == syntax.EQ { - // k=v - n++ - if seenKwargs { - r.errorf(pos, "keyword argument may not follow **kwargs") - } else if seenVarargs { - r.errorf(pos, "keyword argument may not follow *args") - } - x := binop.X.(*syntax.Ident) - if seenName[x.Name] { - r.errorf(x.NamePos, "keyword argument %q is repeated", x.Name) - } else { - if seenName == nil { - seenName = make(map[string]bool) - } - seenName[x.Name] = true - } - r.expr(binop.Y) - } else { - // positional argument - p++ - if seenVarargs { - r.errorf(pos, "positional argument may not follow *args") - } else if seenKwargs { - r.errorf(pos, "positional argument may not follow **kwargs") - } else if len(seenName) > 0 { - r.errorf(pos, "positional argument may not follow named") - } - r.expr(arg) - } - } - - // Fail gracefully if compiler-imposed limit is exceeded. - if p >= 256 { - pos, _ := e.Span() - r.errorf(pos, "%v positional arguments in call, limit is 255", p) - } - if n >= 256 { - pos, _ := e.Span() - r.errorf(pos, "%v keyword arguments in call, limit is 255", n) - } - - case *syntax.LambdaExpr: - fn := &Function{ - Name: "lambda", - Pos: e.Lambda, - Params: e.Params, - Body: []syntax.Stmt{&syntax.ReturnStmt{Result: e.Body}}, - } - e.Function = fn - r.function(fn, e.Lambda) - - case *syntax.ParenExpr: - r.expr(e.X) - - default: - log.Panicf("unexpected expr %T", e) - } -} - -func (r *resolver) function(function *Function, pos syntax.Position) { - // Resolve defaults in enclosing environment. - for _, param := range function.Params { - if binary, ok := param.(*syntax.BinaryExpr); ok { - r.expr(binary.Y) - } - } - - // Enter function block. - b := &block{function: function} - r.push(b) - - var seenOptional bool - var star *syntax.UnaryExpr // * or *args param - var starStar *syntax.Ident // **kwargs ident - var numKwonlyParams int - for _, param := range function.Params { - switch param := param.(type) { - case *syntax.Ident: - // e.g. x - if starStar != nil { - r.errorf(param.NamePos, "required parameter may not follow **%s", starStar.Name) - } else if star != nil { - numKwonlyParams++ - } else if seenOptional { - r.errorf(param.NamePos, "required parameter may not follow optional") - } - if r.bind(param) { - r.errorf(param.NamePos, "duplicate parameter: %s", param.Name) - } - - case *syntax.BinaryExpr: - // e.g. y=dflt - if starStar != nil { - r.errorf(param.OpPos, "optional parameter may not follow **%s", starStar.Name) - } else if star != nil { - numKwonlyParams++ - } - if id := param.X.(*syntax.Ident); r.bind(id) { - r.errorf(param.OpPos, "duplicate parameter: %s", id.Name) - } - seenOptional = true - - case *syntax.UnaryExpr: - // * or *args or **kwargs - if param.Op == syntax.STAR { - if starStar != nil { - r.errorf(param.OpPos, "* parameter may not follow **%s", starStar.Name) - } else if star != nil { - r.errorf(param.OpPos, "multiple * parameters not allowed") - } else { - star = param - } - } else { - if starStar != nil { - r.errorf(param.OpPos, "multiple ** parameters not allowed") - } - starStar = param.X.(*syntax.Ident) - } - } - } - - // Bind the *args and **kwargs parameters at the end, - // so that regular parameters a/b/c are contiguous and - // there is no hole for the "*": - // def f(a, b, *args, c=0, **kwargs) - // def f(a, b, *, c=0, **kwargs) - if star != nil { - if id, _ := star.X.(*syntax.Ident); id != nil { - // *args - if r.bind(id) { - r.errorf(id.NamePos, "duplicate parameter: %s", id.Name) - } - function.HasVarargs = true - } else if numKwonlyParams == 0 { - r.errorf(star.OpPos, "bare * must be followed by keyword-only parameters") - } - } - if starStar != nil { - if r.bind(starStar) { - r.errorf(starStar.NamePos, "duplicate parameter: %s", starStar.Name) - } - function.HasKwargs = true - } - - function.NumKwonlyParams = numKwonlyParams - r.stmts(function.Body) - - // Resolve all uses of this function's local vars, - // and keep just the remaining uses of free/global vars. - b.resolveLocalUses() - - // Leave function block. - r.pop() - - // References within the function body to globals are not - // resolved until the end of the module. -} - -func (r *resolver) resolveNonLocalUses(b *block) { - // First resolve inner blocks. - for _, child := range b.children { - r.resolveNonLocalUses(child) - } - for _, use := range b.uses { - use.id.Binding = r.lookupLexical(use, use.env) - } -} - -// lookupLocal looks up an identifier within its immediately enclosing function. -func lookupLocal(use use) *Binding { - for env := use.env; env != nil; env = env.parent { - if bind, ok := env.bindings[use.id.Name]; ok { - if bind.Scope == Free { - // shouldn't exist till later - log.Panicf("%s: internal error: %s, %v", use.id.NamePos, use.id.Name, bind) - } - return bind // found - } - if env.function != nil { - break - } - } - return nil // not found in this function -} - -// lookupLexical looks up an identifier use.id within its lexically enclosing environment. -// The use.env field captures the original environment for error reporting. -func (r *resolver) lookupLexical(use use, env *block) (bind *Binding) { - if debug { - fmt.Printf("lookupLexical %s in %s = ...\n", use.id.Name, env) - defer func() { fmt.Printf("= %v\n", bind) }() - } - - // Is this the file block? - if env == r.file { - return r.useToplevel(use) // file-local, global, predeclared, or not found - } - - // Defined in this block? - bind, ok := env.bindings[use.id.Name] - if !ok { - // Defined in parent block? - bind = r.lookupLexical(use, env.parent) - if env.function != nil && (bind.Scope == Local || bind.Scope == Free || bind.Scope == Cell) { - // Found in parent block, which belongs to enclosing function. - // Add the parent's binding to the function's freevars, - // and add a new 'free' binding to the inner function's block, - // and turn the parent's local into cell. - if bind.Scope == Local { - bind.Scope = Cell - } - index := len(env.function.FreeVars) - env.function.FreeVars = append(env.function.FreeVars, bind) - bind = &Binding{ - First: bind.First, - Scope: Free, - Index: index, - } - if debug { - fmt.Printf("creating freevar %v in function at %s: %s\n", - len(env.function.FreeVars), env.function.Pos, use.id.Name) - } - } - - // Memoize, to avoid duplicate free vars - // and redundant global (failing) lookups. - env.bind(use.id.Name, bind) - } - return bind -} diff --git a/vendor/go.starlark.net/starlark/debug.go b/vendor/go.starlark.net/starlark/debug.go deleted file mode 100644 index 22a21240f..000000000 --- a/vendor/go.starlark.net/starlark/debug.go +++ /dev/null @@ -1,42 +0,0 @@ -package starlark - -import "go.starlark.net/syntax" - -// This file defines an experimental API for the debugging tools. -// Some of these declarations expose details of internal packages. -// (The debugger makes liberal use of exported fields of unexported types.) -// Breaking changes may occur without notice. - -// Local returns the value of the i'th local variable. -// It may be nil if not yet assigned. -// -// Local may be called only for frames whose Callable is a *Function (a -// function defined by Starlark source code), and only while the frame -// is active; it will panic otherwise. -// -// This function is provided only for debugging tools. -// -// THIS API IS EXPERIMENTAL AND MAY CHANGE WITHOUT NOTICE. -func (fr *frame) Local(i int) Value { return fr.locals[i] } - -// DebugFrame is the debugger API for a frame of the interpreter's call stack. -// -// Most applications have no need for this API; use CallFrame instead. -// -// Clients must not retain a DebugFrame nor call any of its methods once -// the current built-in call has returned or execution has resumed -// after a breakpoint as this may have unpredictable effects, including -// but not limited to retention of object that would otherwise be garbage. -type DebugFrame interface { - Callable() Callable // returns the frame's function - Local(i int) Value // returns the value of the (Starlark) frame's ith local variable - Position() syntax.Position // returns the current position of execution in this frame -} - -// DebugFrame returns the debugger interface for -// the specified frame of the interpreter's call stack. -// Frame numbering is as for Thread.CallFrame. -// -// This function is intended for use in debugging tools. -// Most applications should have no need for it; use CallFrame instead. -func (thread *Thread) DebugFrame(depth int) DebugFrame { return thread.frameAt(depth) } diff --git a/vendor/go.starlark.net/starlark/empty.s b/vendor/go.starlark.net/starlark/empty.s deleted file mode 100644 index 3b8216999..000000000 --- a/vendor/go.starlark.net/starlark/empty.s +++ /dev/null @@ -1,3 +0,0 @@ -// The presence of this file allows the package to use the -// "go:linkname" hack to call non-exported functions in the -// Go runtime, such as hardware-accelerated string hashing. diff --git a/vendor/go.starlark.net/starlark/eval.go b/vendor/go.starlark.net/starlark/eval.go deleted file mode 100644 index 949cb934d..000000000 --- a/vendor/go.starlark.net/starlark/eval.go +++ /dev/null @@ -1,1648 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math/big" - "sort" - "strings" - "sync/atomic" - "time" - "unicode" - "unicode/utf8" - "unsafe" - - "go.starlark.net/internal/compile" - "go.starlark.net/internal/spell" - "go.starlark.net/resolve" - "go.starlark.net/syntax" -) - -// A Thread contains the state of a Starlark thread, -// such as its call stack and thread-local storage. -// The Thread is threaded throughout the evaluator. -type Thread struct { - // Name is an optional name that describes the thread, for debugging. - Name string - - // stack is the stack of (internal) call frames. - stack []*frame - - // Print is the client-supplied implementation of the Starlark - // 'print' function. If nil, fmt.Fprintln(os.Stderr, msg) is - // used instead. - Print func(thread *Thread, msg string) - - // Load is the client-supplied implementation of module loading. - // Repeated calls with the same module name must return the same - // module environment or error. - // The error message need not include the module name. - // - // See example_test.go for some example implementations of Load. - Load func(thread *Thread, module string) (StringDict, error) - - // OnMaxSteps is called when the thread reaches the limit set by SetMaxExecutionSteps. - // The default behavior is to call thread.Cancel("too many steps"). - OnMaxSteps func(thread *Thread) - - // Steps a count of abstract computation steps executed - // by this thread. It is incremented by the interpreter. It may be used - // as a measure of the approximate cost of Starlark execution, by - // computing the difference in its value before and after a computation. - // - // The precise meaning of "step" is not specified and may change. - Steps, maxSteps uint64 - - // cancelReason records the reason from the first call to Cancel. - cancelReason *string - - // locals holds arbitrary "thread-local" Go values belonging to the client. - // They are accessible to the client but not to any Starlark program. - locals map[string]interface{} - - // proftime holds the accumulated execution time since the last profile event. - proftime time.Duration -} - -// ExecutionSteps returns the current value of Steps. -func (thread *Thread) ExecutionSteps() uint64 { - return thread.Steps -} - -// SetMaxExecutionSteps sets a limit on the number of Starlark -// computation steps that may be executed by this thread. If the -// thread's step counter exceeds this limit, the interpreter calls -// the optional OnMaxSteps function or the default behavior -// of calling thread.Cancel("too many steps"). -func (thread *Thread) SetMaxExecutionSteps(max uint64) { - thread.maxSteps = max -} - -// Uncancel resets the cancellation state. -// -// Unlike most methods of Thread, it is safe to call Uncancel from any -// goroutine, even if the thread is actively executing. -func (thread *Thread) Uncancel() { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason)), nil) -} - -// Cancel causes execution of Starlark code in the specified thread to -// promptly fail with an EvalError that includes the specified reason. -// There may be a delay before the interpreter observes the cancellation -// if the thread is currently in a call to a built-in function. -// -// Call [Uncancel] to reset the cancellation state. -// -// Unlike most methods of Thread, it is safe to call Cancel from any -// goroutine, even if the thread is actively executing. -func (thread *Thread) Cancel(reason string) { - // Atomically set cancelReason, preserving earlier reason if any. - atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason)), nil, unsafe.Pointer(&reason)) -} - -// SetLocal sets the thread-local value associated with the specified key. -// It must not be called after execution begins. -func (thread *Thread) SetLocal(key string, value interface{}) { - if thread.locals == nil { - thread.locals = make(map[string]interface{}) - } - thread.locals[key] = value -} - -// Local returns the thread-local value associated with the specified key. -func (thread *Thread) Local(key string) interface{} { - return thread.locals[key] -} - -// CallFrame returns a copy of the specified frame of the callstack. -// It should only be used in built-ins called from Starlark code. -// Depth 0 means the frame of the built-in itself, 1 is its caller, and so on. -// -// It is equivalent to CallStack().At(depth), but more efficient. -func (thread *Thread) CallFrame(depth int) CallFrame { - return thread.frameAt(depth).asCallFrame() -} - -func (thread *Thread) frameAt(depth int) *frame { - return thread.stack[len(thread.stack)-1-depth] -} - -// CallStack returns a new slice containing the thread's stack of call frames. -func (thread *Thread) CallStack() CallStack { - frames := make([]CallFrame, len(thread.stack)) - for i, fr := range thread.stack { - frames[i] = fr.asCallFrame() - } - return frames -} - -// CallStackDepth returns the number of frames in the current call stack. -func (thread *Thread) CallStackDepth() int { return len(thread.stack) } - -// A StringDict is a mapping from names to values, and represents -// an environment such as the global variables of a module. -// It is not a true starlark.Value. -type StringDict map[string]Value - -// Keys returns a new sorted slice of d's keys. -func (d StringDict) Keys() []string { - names := make([]string, 0, len(d)) - for name := range d { - names = append(names, name) - } - sort.Strings(names) - return names -} - -func (d StringDict) String() string { - buf := new(strings.Builder) - buf.WriteByte('{') - sep := "" - for _, name := range d.Keys() { - buf.WriteString(sep) - buf.WriteString(name) - buf.WriteString(": ") - writeValue(buf, d[name], nil) - sep = ", " - } - buf.WriteByte('}') - return buf.String() -} - -func (d StringDict) Freeze() { - for _, v := range d { - v.Freeze() - } -} - -// Has reports whether the dictionary contains the specified key. -func (d StringDict) Has(key string) bool { _, ok := d[key]; return ok } - -// A frame records a call to a Starlark function (including module toplevel) -// or a built-in function or method. -type frame struct { - callable Callable // current function (or toplevel) or built-in - pc uint32 // program counter (Starlark frames only) - locals []Value // local variables (Starlark frames only) - spanStart int64 // start time of current profiler span -} - -// Position returns the source position of the current point of execution in this frame. -func (fr *frame) Position() syntax.Position { - switch c := fr.callable.(type) { - case *Function: - // Starlark function - return c.funcode.Position(fr.pc) - case callableWithPosition: - // If a built-in Callable defines - // a Position method, use it. - return c.Position() - } - return syntax.MakePosition(&builtinFilename, 0, 0) -} - -var builtinFilename = "" - -// Function returns the frame's function or built-in. -func (fr *frame) Callable() Callable { return fr.callable } - -// A CallStack is a stack of call frames, outermost first. -type CallStack []CallFrame - -// At returns a copy of the frame at depth i. -// At(0) returns the topmost frame. -func (stack CallStack) At(i int) CallFrame { return stack[len(stack)-1-i] } - -// Pop removes and returns the topmost frame. -func (stack *CallStack) Pop() CallFrame { - last := len(*stack) - 1 - top := (*stack)[last] - *stack = (*stack)[:last] - return top -} - -// String returns a user-friendly description of the stack. -func (stack CallStack) String() string { - out := new(strings.Builder) - if len(stack) > 0 { - fmt.Fprintf(out, "Traceback (most recent call last):\n") - } - for _, fr := range stack { - fmt.Fprintf(out, " %s: in %s\n", fr.Pos, fr.Name) - } - return out.String() -} - -// An EvalError is a Starlark evaluation error and -// a copy of the thread's stack at the moment of the error. -type EvalError struct { - Msg string - CallStack CallStack - cause error -} - -// A CallFrame represents the function name and current -// position of execution of an enclosing call frame. -type CallFrame struct { - Name string - Pos syntax.Position -} - -func (fr *frame) asCallFrame() CallFrame { - return CallFrame{ - Name: fr.Callable().Name(), - Pos: fr.Position(), - } -} - -func (thread *Thread) evalError(err error) *EvalError { - return &EvalError{ - Msg: err.Error(), - CallStack: thread.CallStack(), - cause: err, - } -} - -func (e *EvalError) Error() string { return e.Msg } - -// Backtrace returns a user-friendly error message describing the stack -// of calls that led to this error. -func (e *EvalError) Backtrace() string { - // If the topmost stack frame is a built-in function, - // remove it from the stack and add print "Error in fn:". - stack := e.CallStack - suffix := "" - if last := len(stack) - 1; last >= 0 && stack[last].Pos.Filename() == builtinFilename { - suffix = " in " + stack[last].Name - stack = stack[:last] - } - return fmt.Sprintf("%sError%s: %s", stack, suffix, e.Msg) -} - -func (e *EvalError) Unwrap() error { return e.cause } - -// A Program is a compiled Starlark program. -// -// Programs are immutable, and contain no Values. -// A Program may be created by parsing a source file (see SourceProgram) -// or by loading a previously saved compiled program (see CompiledProgram). -type Program struct { - compiled *compile.Program -} - -// CompilerVersion is the version number of the protocol for compiled -// files. Applications must not run programs compiled by one version -// with an interpreter at another version, and should thus incorporate -// the compiler version into the cache key when reusing compiled code. -const CompilerVersion = compile.Version - -// Filename returns the name of the file from which this program was loaded. -func (prog *Program) Filename() string { return prog.compiled.Toplevel.Pos.Filename() } - -func (prog *Program) String() string { return prog.Filename() } - -// NumLoads returns the number of load statements in the compiled program. -func (prog *Program) NumLoads() int { return len(prog.compiled.Loads) } - -// Load(i) returns the name and position of the i'th module directly -// loaded by this one, where 0 <= i < NumLoads(). -// The name is unresolved---exactly as it appears in the source. -func (prog *Program) Load(i int) (string, syntax.Position) { - id := prog.compiled.Loads[i] - return id.Name, id.Pos -} - -// WriteTo writes the compiled module to the specified output stream. -func (prog *Program) Write(out io.Writer) error { - data := prog.compiled.Encode() - _, err := out.Write(data) - return err -} - -// ExecFile parses, resolves, and executes a Starlark file in the -// specified global environment, which may be modified during execution. -// -// Thread is the state associated with the Starlark thread. -// -// The filename and src parameters are as for syntax.Parse: -// filename is the name of the file to execute, -// and the name that appears in error messages; -// src is an optional source of bytes to use -// instead of filename. -// -// predeclared defines the predeclared names specific to this module. -// Execution does not modify this dictionary, though it may mutate -// its values. -// -// If ExecFile fails during evaluation, it returns an *EvalError -// containing a backtrace. -func ExecFile(thread *Thread, filename string, src interface{}, predeclared StringDict) (StringDict, error) { - // Parse, resolve, and compile a Starlark source file. - _, mod, err := SourceProgram(filename, src, predeclared.Has) - if err != nil { - return nil, err - } - - g, err := mod.Init(thread, predeclared) - g.Freeze() - return g, err -} - -// SourceProgram produces a new program by parsing, resolving, -// and compiling a Starlark source file. -// On success, it returns the parsed file and the compiled program. -// The filename and src parameters are as for syntax.Parse. -// -// The isPredeclared predicate reports whether a name is -// a pre-declared identifier of the current module. -// Its typical value is predeclared.Has, -// where predeclared is a StringDict of pre-declared values. -func SourceProgram(filename string, src interface{}, isPredeclared func(string) bool) (*syntax.File, *Program, error) { - f, err := syntax.Parse(filename, src, 0) - if err != nil { - return nil, nil, err - } - prog, err := FileProgram(f, isPredeclared) - return f, prog, err -} - -// FileProgram produces a new program by resolving, -// and compiling the Starlark source file syntax tree. -// On success, it returns the compiled program. -// -// Resolving a syntax tree mutates it. -// Do not call FileProgram more than once on the same file. -// -// The isPredeclared predicate reports whether a name is -// a pre-declared identifier of the current module. -// Its typical value is predeclared.Has, -// where predeclared is a StringDict of pre-declared values. -func FileProgram(f *syntax.File, isPredeclared func(string) bool) (*Program, error) { - if err := resolve.File(f, isPredeclared, Universe.Has); err != nil { - return nil, err - } - - var pos syntax.Position - if len(f.Stmts) > 0 { - pos = syntax.Start(f.Stmts[0]) - } else { - pos = syntax.MakePosition(&f.Path, 1, 1) - } - - module := f.Module.(*resolve.Module) - compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) - - return &Program{compiled}, nil -} - -// CompiledProgram produces a new program from the representation -// of a compiled program previously saved by Program.Write. -func CompiledProgram(in io.Reader) (*Program, error) { - data, err := ioutil.ReadAll(in) - if err != nil { - return nil, err - } - compiled, err := compile.DecodeProgram(data) - if err != nil { - return nil, err - } - return &Program{compiled}, nil -} - -// Init creates a set of global variables for the program, -// executes the toplevel code of the specified program, -// and returns a new, unfrozen dictionary of the globals. -func (prog *Program) Init(thread *Thread, predeclared StringDict) (StringDict, error) { - toplevel := makeToplevelFunction(prog.compiled, predeclared) - - _, err := Call(thread, toplevel, nil, nil) - - // Convert the global environment to a map. - // We return a (partial) map even in case of error. - return toplevel.Globals(), err -} - -// ExecREPLChunk compiles and executes file f in the specified thread -// and global environment. This is a variant of ExecFile specialized to -// the needs of a REPL, in which a sequence of input chunks, each -// syntactically a File, manipulates the same set of module globals, -// which are not frozen after execution. -// -// This function is intended to support only go.starlark.net/repl. -// Its API stability is not guaranteed. -func ExecREPLChunk(f *syntax.File, thread *Thread, globals StringDict) error { - var predeclared StringDict - - // -- variant of FileProgram -- - - if err := resolve.REPLChunk(f, globals.Has, predeclared.Has, Universe.Has); err != nil { - return err - } - - var pos syntax.Position - if len(f.Stmts) > 0 { - pos = syntax.Start(f.Stmts[0]) - } else { - pos = syntax.MakePosition(&f.Path, 1, 1) - } - - module := f.Module.(*resolve.Module) - compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) - prog := &Program{compiled} - - // -- variant of Program.Init -- - - toplevel := makeToplevelFunction(prog.compiled, predeclared) - - // Initialize module globals from parameter. - for i, id := range prog.compiled.Globals { - if v := globals[id.Name]; v != nil { - toplevel.module.globals[i] = v - } - } - - _, err := Call(thread, toplevel, nil, nil) - - // Reflect changes to globals back to parameter, even after an error. - for i, id := range prog.compiled.Globals { - if v := toplevel.module.globals[i]; v != nil { - globals[id.Name] = v - } - } - - return err -} - -func makeToplevelFunction(prog *compile.Program, predeclared StringDict) *Function { - // Create the Starlark value denoted by each program constant c. - constants := make([]Value, len(prog.Constants)) - for i, c := range prog.Constants { - var v Value - switch c := c.(type) { - case int64: - v = MakeInt64(c) - case *big.Int: - v = MakeBigInt(c) - case string: - v = String(c) - case compile.Bytes: - v = Bytes(c) - case float64: - v = Float(c) - default: - log.Panicf("unexpected constant %T: %v", c, c) - } - constants[i] = v - } - - return &Function{ - funcode: prog.Toplevel, - module: &module{ - program: prog, - predeclared: predeclared, - globals: make([]Value, len(prog.Globals)), - constants: constants, - }, - } -} - -// Eval parses, resolves, and evaluates an expression within the -// specified (predeclared) environment. -// -// Evaluation cannot mutate the environment dictionary itself, -// though it may modify variables reachable from the dictionary. -// -// The filename and src parameters are as for syntax.Parse. -// -// If Eval fails during evaluation, it returns an *EvalError -// containing a backtrace. -func Eval(thread *Thread, filename string, src interface{}, env StringDict) (Value, error) { - expr, err := syntax.ParseExpr(filename, src, 0) - if err != nil { - return nil, err - } - f, err := makeExprFunc(expr, env) - if err != nil { - return nil, err - } - return Call(thread, f, nil, nil) -} - -// EvalExpr resolves and evaluates an expression within the -// specified (predeclared) environment. -// Evaluating a comma-separated list of expressions yields a tuple value. -// -// Resolving an expression mutates it. -// Do not call EvalExpr more than once for the same expression. -// -// Evaluation cannot mutate the environment dictionary itself, -// though it may modify variables reachable from the dictionary. -// -// If Eval fails during evaluation, it returns an *EvalError -// containing a backtrace. -func EvalExpr(thread *Thread, expr syntax.Expr, env StringDict) (Value, error) { - fn, err := makeExprFunc(expr, env) - if err != nil { - return nil, err - } - return Call(thread, fn, nil, nil) -} - -// ExprFunc returns a no-argument function -// that evaluates the expression whose source is src. -func ExprFunc(filename string, src interface{}, env StringDict) (*Function, error) { - expr, err := syntax.ParseExpr(filename, src, 0) - if err != nil { - return nil, err - } - return makeExprFunc(expr, env) -} - -// makeExprFunc returns a no-argument function whose body is expr. -func makeExprFunc(expr syntax.Expr, env StringDict) (*Function, error) { - locals, err := resolve.Expr(expr, env.Has, Universe.Has) - if err != nil { - return nil, err - } - - return makeToplevelFunction(compile.Expr(expr, "", locals), env), nil -} - -// The following functions are primitive operations of the byte code interpreter. - -// list += iterable -func listExtend(x *List, y Iterable) { - if ylist, ok := y.(*List); ok { - // fast path: list += list - x.elems = append(x.elems, ylist.elems...) - } else { - iter := y.Iterate() - defer iter.Done() - var z Value - for iter.Next(&z) { - x.elems = append(x.elems, z) - } - } -} - -// getAttr implements x.dot. -func getAttr(x Value, name string) (Value, error) { - hasAttr, ok := x.(HasAttrs) - if !ok { - return nil, fmt.Errorf("%s has no .%s field or method", x.Type(), name) - } - - var errmsg string - v, err := hasAttr.Attr(name) - if err == nil { - if v != nil { - return v, nil // success - } - // (nil, nil) => generic error - errmsg = fmt.Sprintf("%s has no .%s field or method", x.Type(), name) - } else if nsa, ok := err.(NoSuchAttrError); ok { - errmsg = string(nsa) - } else { - return nil, err // return error as is - } - - // add spelling hint - if n := spell.Nearest(name, hasAttr.AttrNames()); n != "" { - errmsg = fmt.Sprintf("%s (did you mean .%s?)", errmsg, n) - } - - return nil, fmt.Errorf("%s", errmsg) -} - -// setField implements x.name = y. -func setField(x Value, name string, y Value) error { - if x, ok := x.(HasSetField); ok { - err := x.SetField(name, y) - if _, ok := err.(NoSuchAttrError); ok { - // No such field: check spelling. - if n := spell.Nearest(name, x.AttrNames()); n != "" { - err = fmt.Errorf("%s (did you mean .%s?)", err, n) - } - } - return err - } - - return fmt.Errorf("can't assign to .%s field of %s", name, x.Type()) -} - -// getIndex implements x[y]. -func getIndex(x, y Value) (Value, error) { - switch x := x.(type) { - case Mapping: // dict - z, found, err := x.Get(y) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("key %v not in %s", y, x.Type()) - } - return z, nil - - case Indexable: // string, list, tuple - n := x.Len() - i, err := AsInt32(y) - if err != nil { - return nil, fmt.Errorf("%s index: %s", x.Type(), err) - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return nil, outOfRange(origI, n, x) - } - return x.Index(i), nil - } - return nil, fmt.Errorf("unhandled index operation %s[%s]", x.Type(), y.Type()) -} - -func outOfRange(i, n int, x Value) error { - if n == 0 { - return fmt.Errorf("index %d out of range: empty %s", i, x.Type()) - } else { - return fmt.Errorf("%s index %d out of range [%d:%d]", x.Type(), i, -n, n-1) - } -} - -// setIndex implements x[y] = z. -func setIndex(x, y, z Value) error { - switch x := x.(type) { - case HasSetKey: - if err := x.SetKey(y, z); err != nil { - return err - } - - case HasSetIndex: - n := x.Len() - i, err := AsInt32(y) - if err != nil { - return err - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return outOfRange(origI, n, x) - } - return x.SetIndex(i, z) - - default: - return fmt.Errorf("%s value does not support item assignment", x.Type()) - } - return nil -} - -// Unary applies a unary operator (+, -, ~, not) to its operand. -func Unary(op syntax.Token, x Value) (Value, error) { - // The NOT operator is not customizable. - if op == syntax.NOT { - return !x.Truth(), nil - } - - // Int, Float, and user-defined types - if x, ok := x.(HasUnary); ok { - // (nil, nil) => unhandled - y, err := x.Unary(op) - if y != nil || err != nil { - return y, err - } - } - - return nil, fmt.Errorf("unknown unary op: %s %s", op, x.Type()) -} - -// Binary applies a strict binary operator (not AND or OR) to its operands. -// For equality tests or ordered comparisons, use Compare instead. -func Binary(op syntax.Token, x, y Value) (Value, error) { - switch op { - case syntax.PLUS: - switch x := x.(type) { - case String: - if y, ok := y.(String); ok { - return x + y, nil - } - case Int: - switch y := y.(type) { - case Int: - return x.Add(y), nil - case Float: - xf, err := x.finiteFloat() - if err != nil { - return nil, err - } - return xf + y, nil - } - case Float: - switch y := y.(type) { - case Float: - return x + y, nil - case Int: - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - return x + yf, nil - } - case *List: - if y, ok := y.(*List); ok { - z := make([]Value, 0, x.Len()+y.Len()) - z = append(z, x.elems...) - z = append(z, y.elems...) - return NewList(z), nil - } - case Tuple: - if y, ok := y.(Tuple); ok { - z := make(Tuple, 0, len(x)+len(y)) - z = append(z, x...) - z = append(z, y...) - return z, nil - } - } - - case syntax.MINUS: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - return x.Sub(y), nil - case Float: - xf, err := x.finiteFloat() - if err != nil { - return nil, err - } - return xf - y, nil - } - case Float: - switch y := y.(type) { - case Float: - return x - y, nil - case Int: - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - return x - yf, nil - } - } - - case syntax.STAR: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - return x.Mul(y), nil - case Float: - xf, err := x.finiteFloat() - if err != nil { - return nil, err - } - return xf * y, nil - case String: - return stringRepeat(y, x) - case Bytes: - return bytesRepeat(y, x) - case *List: - elems, err := tupleRepeat(Tuple(y.elems), x) - if err != nil { - return nil, err - } - return NewList(elems), nil - case Tuple: - return tupleRepeat(y, x) - } - case Float: - switch y := y.(type) { - case Float: - return x * y, nil - case Int: - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - return x * yf, nil - } - case String: - if y, ok := y.(Int); ok { - return stringRepeat(x, y) - } - case Bytes: - if y, ok := y.(Int); ok { - return bytesRepeat(x, y) - } - case *List: - if y, ok := y.(Int); ok { - elems, err := tupleRepeat(Tuple(x.elems), y) - if err != nil { - return nil, err - } - return NewList(elems), nil - } - case Tuple: - if y, ok := y.(Int); ok { - return tupleRepeat(x, y) - } - - } - - case syntax.SLASH: - switch x := x.(type) { - case Int: - xf, err := x.finiteFloat() - if err != nil { - return nil, err - } - switch y := y.(type) { - case Int: - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - if yf == 0.0 { - return nil, fmt.Errorf("floating-point division by zero") - } - return xf / yf, nil - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floating-point division by zero") - } - return xf / y, nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floating-point division by zero") - } - return x / y, nil - case Int: - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - if yf == 0.0 { - return nil, fmt.Errorf("floating-point division by zero") - } - return x / yf, nil - } - } - - case syntax.SLASHSLASH: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("floored division by zero") - } - return x.Div(y), nil - case Float: - xf, err := x.finiteFloat() - if err != nil { - return nil, err - } - if y == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(xf / y), nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(x / y), nil - case Int: - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - if yf == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(x / yf), nil - } - } - - case syntax.PERCENT: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("integer modulo by zero") - } - return x.Mod(y), nil - case Float: - xf, err := x.finiteFloat() - if err != nil { - return nil, err - } - if y == 0 { - return nil, fmt.Errorf("floating-point modulo by zero") - } - return xf.Mod(y), nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floating-point modulo by zero") - } - return x.Mod(y), nil - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("floating-point modulo by zero") - } - yf, err := y.finiteFloat() - if err != nil { - return nil, err - } - return x.Mod(yf), nil - } - case String: - return interpolate(string(x), y) - } - - case syntax.NOT_IN: - z, err := Binary(syntax.IN, x, y) - if err != nil { - return nil, err - } - return !z.Truth(), nil - - case syntax.IN: - switch y := y.(type) { - case *List: - for _, elem := range y.elems { - if eq, err := Equal(elem, x); err != nil { - return nil, err - } else if eq { - return True, nil - } - } - return False, nil - case Tuple: - for _, elem := range y { - if eq, err := Equal(elem, x); err != nil { - return nil, err - } else if eq { - return True, nil - } - } - return False, nil - case Mapping: // e.g. dict - // Ignore error from Get as we cannot distinguish true - // errors (value cycle, type error) from "key not found". - _, found, _ := y.Get(x) - return Bool(found), nil - case *Set: - ok, err := y.Has(x) - return Bool(ok), err - case String: - needle, ok := x.(String) - if !ok { - return nil, fmt.Errorf("'in ' requires string as left operand, not %s", x.Type()) - } - return Bool(strings.Contains(string(y), string(needle))), nil - case Bytes: - switch needle := x.(type) { - case Bytes: - return Bool(strings.Contains(string(y), string(needle))), nil - case Int: - var b byte - if err := AsInt(needle, &b); err != nil { - return nil, fmt.Errorf("int in bytes: %s", err) - } - return Bool(strings.IndexByte(string(y), b) >= 0), nil - default: - return nil, fmt.Errorf("'in bytes' requires bytes or int as left operand, not %s", x.Type()) - } - case rangeValue: - i, err := NumberToInt(x) - if err != nil { - return nil, fmt.Errorf("'in ' requires integer as left operand, not %s", x.Type()) - } - return Bool(y.contains(i)), nil - } - - case syntax.PIPE: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.Or(y), nil - } - - case *Dict: // union - if y, ok := y.(*Dict); ok { - return x.Union(y), nil - } - - case *Set: // union - if y, ok := y.(*Set); ok { - iter := Iterate(y) - defer iter.Done() - return x.Union(iter) - } - } - - case syntax.AMP: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.And(y), nil - } - case *Set: // intersection - if y, ok := y.(*Set); ok { - set := new(Set) - if x.Len() > y.Len() { - x, y = y, x // opt: range over smaller set - } - for xe := x.ht.head; xe != nil; xe = xe.next { - // Has, Insert cannot fail here. - if found, _ := y.Has(xe.key); found { - set.Insert(xe.key) - } - } - return set, nil - } - } - - case syntax.CIRCUMFLEX: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.Xor(y), nil - } - case *Set: // symmetric difference - if y, ok := y.(*Set); ok { - set := new(Set) - for xe := x.ht.head; xe != nil; xe = xe.next { - if found, _ := y.Has(xe.key); !found { - set.Insert(xe.key) - } - } - for ye := y.ht.head; ye != nil; ye = ye.next { - if found, _ := x.Has(ye.key); !found { - set.Insert(ye.key) - } - } - return set, nil - } - } - - case syntax.LTLT, syntax.GTGT: - if x, ok := x.(Int); ok { - y, err := AsInt32(y) - if err != nil { - return nil, err - } - if y < 0 { - return nil, fmt.Errorf("negative shift count: %v", y) - } - if op == syntax.LTLT { - if y >= 512 { - return nil, fmt.Errorf("shift count too large: %v", y) - } - return x.Lsh(uint(y)), nil - } else { - return x.Rsh(uint(y)), nil - } - } - - default: - // unknown operator - goto unknown - } - - // user-defined types - // (nil, nil) => unhandled - if x, ok := x.(HasBinary); ok { - z, err := x.Binary(op, y, Left) - if z != nil || err != nil { - return z, err - } - } - if y, ok := y.(HasBinary); ok { - z, err := y.Binary(op, x, Right) - if z != nil || err != nil { - return z, err - } - } - - // unsupported operand types -unknown: - return nil, fmt.Errorf("unknown binary op: %s %s %s", x.Type(), op, y.Type()) -} - -// It's always possible to overeat in small bites but we'll -// try to stop someone swallowing the world in one gulp. -const maxAlloc = 1 << 30 - -func tupleRepeat(elems Tuple, n Int) (Tuple, error) { - if len(elems) == 0 { - return nil, nil - } - i, err := AsInt32(n) - if err != nil { - return nil, fmt.Errorf("repeat count %s too large", n) - } - if i < 1 { - return nil, nil - } - // Inv: i > 0, len > 0 - sz := len(elems) * i - if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow - // Don't print sz. - return nil, fmt.Errorf("excessive repeat (%d * %d elements)", len(elems), i) - } - res := make([]Value, sz) - // copy elems into res, doubling each time - x := copy(res, elems) - for x < len(res) { - copy(res[x:], res[:x]) - x *= 2 - } - return res, nil -} - -func bytesRepeat(b Bytes, n Int) (Bytes, error) { - res, err := stringRepeat(String(b), n) - return Bytes(res), err -} - -func stringRepeat(s String, n Int) (String, error) { - if s == "" { - return "", nil - } - i, err := AsInt32(n) - if err != nil { - return "", fmt.Errorf("repeat count %s too large", n) - } - if i < 1 { - return "", nil - } - // Inv: i > 0, len > 0 - sz := len(s) * i - if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow - // Don't print sz. - return "", fmt.Errorf("excessive repeat (%d * %d elements)", len(s), i) - } - return String(strings.Repeat(string(s), i)), nil -} - -// Call calls the function fn with the specified positional and keyword arguments. -func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) { - c, ok := fn.(Callable) - if !ok { - return nil, fmt.Errorf("invalid call of non-function (%s)", fn.Type()) - } - - // Allocate and push a new frame. - var fr *frame - // Optimization: use slack portion of thread.stack - // slice as a freelist of empty frames. - if n := len(thread.stack); n < cap(thread.stack) { - fr = thread.stack[n : n+1][0] - } - if fr == nil { - fr = new(frame) - } - - if thread.stack == nil { - // one-time initialization of thread - if thread.maxSteps == 0 { - thread.maxSteps-- // (MaxUint64) - } - } - - thread.stack = append(thread.stack, fr) // push - - fr.callable = c - - thread.beginProfSpan() - - // Use defer to ensure that panics from built-ins - // pass through the interpreter without leaving - // it in a bad state. - defer func() { - thread.endProfSpan() - - // clear out any references - // TODO(adonovan): opt: zero fr.Locals and - // reuse it if it is large enough. - *fr = frame{} - - thread.stack = thread.stack[:len(thread.stack)-1] // pop - }() - - result, err := c.CallInternal(thread, args, kwargs) - - // Sanity check: nil is not a valid Starlark value. - if result == nil && err == nil { - err = fmt.Errorf("internal error: nil (not None) returned from %s", fn) - } - - // Always return an EvalError with an accurate frame. - if err != nil { - if _, ok := err.(*EvalError); !ok { - err = thread.evalError(err) - } - } - - return result, err -} - -func slice(x, lo, hi, step_ Value) (Value, error) { - sliceable, ok := x.(Sliceable) - if !ok { - return nil, fmt.Errorf("invalid slice operand %s", x.Type()) - } - - n := sliceable.Len() - step := 1 - if step_ != None { - var err error - step, err = AsInt32(step_) - if err != nil { - return nil, fmt.Errorf("invalid slice step: %s", err) - } - if step == 0 { - return nil, fmt.Errorf("zero is not a valid slice step") - } - } - - // TODO(adonovan): opt: preallocate result array. - - var start, end int - if step > 0 { - // positive stride - // default indices are [0:n]. - var err error - start, end, err = indices(lo, hi, n) - if err != nil { - return nil, err - } - - if end < start { - end = start // => empty result - } - } else { - // negative stride - // default indices are effectively [n-1:-1], though to - // get this effect using explicit indices requires - // [n-1:-1-n:-1] because of the treatment of -ve values. - start = n - 1 - if err := asIndex(lo, n, &start); err != nil { - return nil, fmt.Errorf("invalid start index: %s", err) - } - if start >= n { - start = n - 1 - } - - end = -1 - if err := asIndex(hi, n, &end); err != nil { - return nil, fmt.Errorf("invalid end index: %s", err) - } - if end < -1 { - end = -1 - } - - if start < end { - start = end // => empty result - } - } - - return sliceable.Slice(start, end, step), nil -} - -// From Hacker's Delight, section 2.8. -func signum64(x int64) int { return int(uint64(x>>63) | uint64(-x)>>63) } -func signum(x int) int { return signum64(int64(x)) } - -// indices converts start_ and end_ to indices in the range [0:len]. -// The start index defaults to 0 and the end index defaults to len. -// An index -len < i < 0 is treated like i+len. -// All other indices outside the range are clamped to the nearest value in the range. -// Beware: start may be greater than end. -// This function is suitable only for slices with positive strides. -func indices(start_, end_ Value, len int) (start, end int, err error) { - start = 0 - if err := asIndex(start_, len, &start); err != nil { - return 0, 0, fmt.Errorf("invalid start index: %s", err) - } - // Clamp to [0:len]. - if start < 0 { - start = 0 - } else if start > len { - start = len - } - - end = len - if err := asIndex(end_, len, &end); err != nil { - return 0, 0, fmt.Errorf("invalid end index: %s", err) - } - // Clamp to [0:len]. - if end < 0 { - end = 0 - } else if end > len { - end = len - } - - return start, end, nil -} - -// asIndex sets *result to the integer value of v, adding len to it -// if it is negative. If v is nil or None, *result is unchanged. -func asIndex(v Value, len int, result *int) error { - if v != nil && v != None { - var err error - *result, err = AsInt32(v) - if err != nil { - return err - } - if *result < 0 { - *result += len - } - } - return nil -} - -// setArgs sets the values of the formal parameters of function fn in -// based on the actual parameter values in args and kwargs. -func setArgs(locals []Value, fn *Function, args Tuple, kwargs []Tuple) error { - - // This is the general schema of a function: - // - // def f(p1, p2=dp2, p3=dp3, *args, k1, k2=dk2, k3, **kwargs) - // - // The p parameters are non-kwonly, and may be specified positionally. - // The k parameters are kwonly, and must be specified by name. - // The defaults tuple is (dp2, dp3, mandatory, dk2, mandatory). - // - // Arguments are processed as follows: - // - positional arguments are bound to a prefix of [p1, p2, p3]. - // - surplus positional arguments are bound to *args. - // - keyword arguments are bound to any of {p1, p2, p3, k1, k2, k3}; - // duplicate bindings are rejected. - // - surplus keyword arguments are bound to **kwargs. - // - defaults are bound to each parameter from p2 to k3 if no value was set. - // default values come from the tuple above. - // It is an error if the tuple entry for an unset parameter is 'mandatory'. - - // Nullary function? - if fn.NumParams() == 0 { - if nactual := len(args) + len(kwargs); nactual > 0 { - return fmt.Errorf("function %s accepts no arguments (%d given)", fn.Name(), nactual) - } - return nil - } - - cond := func(x bool, y, z interface{}) interface{} { - if x { - return y - } - return z - } - - // nparams is the number of ordinary parameters (sans *args and **kwargs). - nparams := fn.NumParams() - var kwdict *Dict - if fn.HasKwargs() { - nparams-- - kwdict = new(Dict) - locals[nparams] = kwdict - } - if fn.HasVarargs() { - nparams-- - } - - // nonkwonly is the number of non-kwonly parameters. - nonkwonly := nparams - fn.NumKwonlyParams() - - // Too many positional args? - n := len(args) - if len(args) > nonkwonly { - if !fn.HasVarargs() { - return fmt.Errorf("function %s accepts %s%d positional argument%s (%d given)", - fn.Name(), - cond(len(fn.defaults) > fn.NumKwonlyParams(), "at most ", ""), - nonkwonly, - cond(nonkwonly == 1, "", "s"), - len(args)) - } - n = nonkwonly - } - - // Bind positional arguments to non-kwonly parameters. - for i := 0; i < n; i++ { - locals[i] = args[i] - } - - // Bind surplus positional arguments to *args parameter. - if fn.HasVarargs() { - tuple := make(Tuple, len(args)-n) - for i := n; i < len(args); i++ { - tuple[i-n] = args[i] - } - locals[nparams] = tuple - } - - // Bind keyword arguments to parameters. - paramIdents := fn.funcode.Locals[:nparams] - for _, pair := range kwargs { - k, v := pair[0].(String), pair[1] - if i := findParam(paramIdents, string(k)); i >= 0 { - if locals[i] != nil { - return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) - } - locals[i] = v - continue - } - if kwdict == nil { - return fmt.Errorf("function %s got an unexpected keyword argument %s", fn.Name(), k) - } - oldlen := kwdict.Len() - kwdict.SetKey(k, v) - if kwdict.Len() == oldlen { - return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) - } - } - - // Are defaults required? - if n < nparams || fn.NumKwonlyParams() > 0 { - m := nparams - len(fn.defaults) // first default - - // Report errors for missing required arguments. - var missing []string - var i int - for i = n; i < m; i++ { - if locals[i] == nil { - missing = append(missing, paramIdents[i].Name) - } - } - - // Bind default values to parameters. - for ; i < nparams; i++ { - if locals[i] == nil { - dflt := fn.defaults[i-m] - if _, ok := dflt.(mandatory); ok { - missing = append(missing, paramIdents[i].Name) - continue - } - locals[i] = dflt - } - } - - if missing != nil { - return fmt.Errorf("function %s missing %d argument%s (%s)", - fn.Name(), len(missing), cond(len(missing) > 1, "s", ""), strings.Join(missing, ", ")) - } - } - return nil -} - -func findParam(params []compile.Binding, name string) int { - for i, param := range params { - if param.Name == name { - return i - } - } - return -1 -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string-interpolation -func interpolate(format string, x Value) (Value, error) { - buf := new(strings.Builder) - index := 0 - nargs := 1 - if tuple, ok := x.(Tuple); ok { - nargs = len(tuple) - } - for { - i := strings.IndexByte(format, '%') - if i < 0 { - buf.WriteString(format) - break - } - buf.WriteString(format[:i]) - format = format[i+1:] - - if format != "" && format[0] == '%' { - buf.WriteByte('%') - format = format[1:] - continue - } - - var arg Value - if format != "" && format[0] == '(' { - // keyword argument: %(name)s. - format = format[1:] - j := strings.IndexByte(format, ')') - if j < 0 { - return nil, fmt.Errorf("incomplete format key") - } - key := format[:j] - if dict, ok := x.(Mapping); !ok { - return nil, fmt.Errorf("format requires a mapping") - } else if v, found, _ := dict.Get(String(key)); found { - arg = v - } else { - return nil, fmt.Errorf("key not found: %s", key) - } - format = format[j+1:] - } else { - // positional argument: %s. - if index >= nargs { - return nil, fmt.Errorf("not enough arguments for format string") - } - if tuple, ok := x.(Tuple); ok { - arg = tuple[index] - } else { - arg = x - } - } - - // NOTE: Starlark does not support any of these optional Python features: - // - optional conversion flags: [#0- +], etc. - // - optional minimum field width (number or *). - // - optional precision (.123 or *) - // - optional length modifier - - // conversion type - if format == "" { - return nil, fmt.Errorf("incomplete format") - } - switch c := format[0]; c { - case 's', 'r': - if str, ok := AsString(arg); ok && c == 's' { - buf.WriteString(str) - } else { - writeValue(buf, arg, nil) - } - case 'd', 'i', 'o', 'x', 'X': - i, err := NumberToInt(arg) - if err != nil { - return nil, fmt.Errorf("%%%c format requires integer: %v", c, err) - } - switch c { - case 'd', 'i': - fmt.Fprintf(buf, "%d", i) - case 'o': - fmt.Fprintf(buf, "%o", i) - case 'x': - fmt.Fprintf(buf, "%x", i) - case 'X': - fmt.Fprintf(buf, "%X", i) - } - case 'e', 'f', 'g', 'E', 'F', 'G': - f, ok := AsFloat(arg) - if !ok { - return nil, fmt.Errorf("%%%c format requires float, not %s", c, arg.Type()) - } - Float(f).format(buf, c) - case 'c': - switch arg := arg.(type) { - case Int: - // chr(int) - r, err := AsInt32(arg) - if err != nil || r < 0 || r > unicode.MaxRune { - return nil, fmt.Errorf("%%c format requires a valid Unicode code point, got %s", arg) - } - buf.WriteRune(rune(r)) - case String: - r, size := utf8.DecodeRuneInString(string(arg)) - if size != len(arg) || len(arg) == 0 { - return nil, fmt.Errorf("%%c format requires a single-character string") - } - buf.WriteRune(r) - default: - return nil, fmt.Errorf("%%c format requires int or single-character string, not %s", arg.Type()) - } - case '%': - buf.WriteByte('%') - default: - return nil, fmt.Errorf("unknown conversion %%%c", c) - } - format = format[1:] - index++ - } - - if index < nargs { - return nil, fmt.Errorf("too many arguments for format string") - } - - return String(buf.String()), nil -} diff --git a/vendor/go.starlark.net/starlark/hashtable.go b/vendor/go.starlark.net/starlark/hashtable.go deleted file mode 100644 index 252d21d12..000000000 --- a/vendor/go.starlark.net/starlark/hashtable.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - _ "unsafe" // for go:linkname hack -) - -// hashtable is used to represent Starlark dict and set values. -// It is a hash table whose key/value entries form a doubly-linked list -// in the order the entries were inserted. -// -// Initialized instances of hashtable must not be copied. -type hashtable struct { - table []bucket // len is zero or a power of two - bucket0 [1]bucket // inline allocation for small maps. - len uint32 - itercount uint32 // number of active iterators (ignored if frozen) - head *entry // insertion order doubly-linked list; may be nil - tailLink **entry // address of nil link at end of list (perhaps &head) - frozen bool - - _ noCopy // triggers vet copylock check on this type. -} - -// noCopy is zero-sized type that triggers vet's copylock check. -// See https://github.com/golang/go/issues/8005#issuecomment-190753527. -type noCopy struct{} - -func (*noCopy) Lock() {} -func (*noCopy) Unlock() {} - -const bucketSize = 8 - -type bucket struct { - entries [bucketSize]entry - next *bucket // linked list of buckets -} - -type entry struct { - hash uint32 // nonzero => in use - key, value Value - next *entry // insertion order doubly-linked list; may be nil - prevLink **entry // address of link to this entry (perhaps &head) -} - -func (ht *hashtable) init(size int) { - if size < 0 { - panic("size < 0") - } - nb := 1 - for overloaded(size, nb) { - nb = nb << 1 - } - if nb < 2 { - ht.table = ht.bucket0[:1] - } else { - ht.table = make([]bucket, nb) - } - ht.tailLink = &ht.head -} - -func (ht *hashtable) freeze() { - if !ht.frozen { - ht.frozen = true - for e := ht.head; e != nil; e = e.next { - e.key.Freeze() - e.value.Freeze() - } - } -} - -func (ht *hashtable) insert(k, v Value) error { - if err := ht.checkMutable("insert into"); err != nil { - return err - } - if ht.table == nil { - ht.init(1) - } - h, err := k.Hash() - if err != nil { - return err - } - if h == 0 { - h = 1 // zero is reserved - } - -retry: - var insert *entry - - // Inspect each bucket in the bucket list. - p := &ht.table[h&(uint32(len(ht.table)-1))] - for { - for i := range p.entries { - e := &p.entries[i] - if e.hash != h { - if e.hash == 0 { - // Found empty entry; make a note. - insert = e - } - continue - } - if eq, err := Equal(k, e.key); err != nil { - return err // e.g. excessively recursive tuple - } else if !eq { - continue - } - // Key already present; update value. - e.value = v - return nil - } - if p.next == nil { - break - } - p = p.next - } - - // Key not found. p points to the last bucket. - - // Does the number of elements exceed the buckets' load factor? - if overloaded(int(ht.len), len(ht.table)) { - ht.grow() - goto retry - } - - if insert == nil { - // No space in existing buckets. Add a new one to the bucket list. - b := new(bucket) - p.next = b - insert = &b.entries[0] - } - - // Insert key/value pair. - insert.hash = h - insert.key = k - insert.value = v - - // Append entry to doubly-linked list. - insert.prevLink = ht.tailLink - *ht.tailLink = insert - ht.tailLink = &insert.next - - ht.len++ - - return nil -} - -func overloaded(elems, buckets int) bool { - const loadFactor = 6.5 // just a guess - return elems >= bucketSize && float64(elems) >= loadFactor*float64(buckets) -} - -func (ht *hashtable) grow() { - // Double the number of buckets and rehash. - // - // Even though this makes reentrant calls to ht.insert, - // calls Equals unnecessarily (since there can't be duplicate keys), - // and recomputes the hash unnecessarily, the gains from - // avoiding these steps were found to be too small to justify - // the extra logic: -2% on hashtable benchmark. - ht.table = make([]bucket, len(ht.table)<<1) - oldhead := ht.head - ht.head = nil - ht.tailLink = &ht.head - ht.len = 0 - for e := oldhead; e != nil; e = e.next { - ht.insert(e.key, e.value) - } - ht.bucket0[0] = bucket{} // clear out unused initial bucket -} - -func (ht *hashtable) lookup(k Value) (v Value, found bool, err error) { - h, err := k.Hash() - if err != nil { - return nil, false, err // unhashable - } - if h == 0 { - h = 1 // zero is reserved - } - if ht.table == nil { - return None, false, nil // empty - } - - // Inspect each bucket in the bucket list. - for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash == h { - if eq, err := Equal(k, e.key); err != nil { - return nil, false, err // e.g. excessively recursive tuple - } else if eq { - return e.value, true, nil // found - } - } - } - } - return None, false, nil // not found -} - -// Items returns all the items in the map (as key/value pairs) in insertion order. -func (ht *hashtable) items() []Tuple { - items := make([]Tuple, 0, ht.len) - array := make([]Value, ht.len*2) // allocate a single backing array - for e := ht.head; e != nil; e = e.next { - pair := Tuple(array[:2:2]) - array = array[2:] - pair[0] = e.key - pair[1] = e.value - items = append(items, pair) - } - return items -} - -func (ht *hashtable) first() (Value, bool) { - if ht.head != nil { - return ht.head.key, true - } - return None, false -} - -func (ht *hashtable) keys() []Value { - keys := make([]Value, 0, ht.len) - for e := ht.head; e != nil; e = e.next { - keys = append(keys, e.key) - } - return keys -} - -func (ht *hashtable) delete(k Value) (v Value, found bool, err error) { - if err := ht.checkMutable("delete from"); err != nil { - return nil, false, err - } - if ht.table == nil { - return None, false, nil // empty - } - h, err := k.Hash() - if err != nil { - return nil, false, err // unhashable - } - if h == 0 { - h = 1 // zero is reserved - } - - // Inspect each bucket in the bucket list. - for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash == h { - if eq, err := Equal(k, e.key); err != nil { - return nil, false, err - } else if eq { - // Remove e from doubly-linked list. - *e.prevLink = e.next - if e.next == nil { - ht.tailLink = e.prevLink // deletion of last entry - } else { - e.next.prevLink = e.prevLink - } - - v := e.value - *e = entry{} - ht.len-- - return v, true, nil // found - } - } - } - } - - // TODO(adonovan): opt: remove completely empty bucket from bucket list. - - return None, false, nil // not found -} - -// checkMutable reports an error if the hash table should not be mutated. -// verb+" dict" should describe the operation. -func (ht *hashtable) checkMutable(verb string) error { - if ht.frozen { - return fmt.Errorf("cannot %s frozen hash table", verb) - } - if ht.itercount > 0 { - return fmt.Errorf("cannot %s hash table during iteration", verb) - } - return nil -} - -func (ht *hashtable) clear() error { - if err := ht.checkMutable("clear"); err != nil { - return err - } - if ht.table != nil { - for i := range ht.table { - ht.table[i] = bucket{} - } - } - ht.head = nil - ht.tailLink = &ht.head - ht.len = 0 - return nil -} - -func (ht *hashtable) addAll(other *hashtable) error { - for e := other.head; e != nil; e = e.next { - if err := ht.insert(e.key, e.value); err != nil { - return err - } - } - return nil -} - -// dump is provided as an aid to debugging. -func (ht *hashtable) dump() { - fmt.Printf("hashtable %p len=%d head=%p tailLink=%p", - ht, ht.len, ht.head, ht.tailLink) - if ht.tailLink != nil { - fmt.Printf(" *tailLink=%p", *ht.tailLink) - } - fmt.Println() - for j := range ht.table { - fmt.Printf("bucket chain %d\n", j) - for p := &ht.table[j]; p != nil; p = p.next { - fmt.Printf("bucket %p\n", p) - for i := range p.entries { - e := &p.entries[i] - fmt.Printf("\tentry %d @ %p hash=%d key=%v value=%v\n", - i, e, e.hash, e.key, e.value) - fmt.Printf("\t\tnext=%p &next=%p prev=%p", - e.next, &e.next, e.prevLink) - if e.prevLink != nil { - fmt.Printf(" *prev=%p", *e.prevLink) - } - fmt.Println() - } - } - } -} - -func (ht *hashtable) iterate() *keyIterator { - if !ht.frozen { - ht.itercount++ - } - return &keyIterator{ht: ht, e: ht.head} -} - -type keyIterator struct { - ht *hashtable - e *entry -} - -func (it *keyIterator) Next(k *Value) bool { - if it.e != nil { - *k = it.e.key - it.e = it.e.next - return true - } - return false -} - -func (it *keyIterator) Done() { - if !it.ht.frozen { - it.ht.itercount-- - } -} - -// TODO(adonovan): use go1.19's maphash.String. - -// hashString computes the hash of s. -func hashString(s string) uint32 { - if len(s) >= 12 { - // Call the Go runtime's optimized hash implementation, - // which uses the AESENC instruction on amd64 machines. - return uint32(goStringHash(s, 0)) - } - return softHashString(s) -} - -//go:linkname goStringHash runtime.stringHash -func goStringHash(s string, seed uintptr) uintptr - -// softHashString computes the 32-bit FNV-1a hash of s in software. -func softHashString(s string) uint32 { - var h uint32 = 2166136261 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} diff --git a/vendor/go.starlark.net/starlark/int.go b/vendor/go.starlark.net/starlark/int.go deleted file mode 100644 index a264e9d22..000000000 --- a/vendor/go.starlark.net/starlark/int.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - "math" - "math/big" - "reflect" - "strconv" - - "go.starlark.net/syntax" -) - -// Int is the type of a Starlark int. -// -// The zero value is not a legal value; use MakeInt(0). -type Int struct{ impl intImpl } - -// --- high-level accessors --- - -// MakeInt returns a Starlark int for the specified signed integer. -func MakeInt(x int) Int { return MakeInt64(int64(x)) } - -// MakeInt64 returns a Starlark int for the specified int64. -func MakeInt64(x int64) Int { - if math.MinInt32 <= x && x <= math.MaxInt32 { - return makeSmallInt(x) - } - return makeBigInt(big.NewInt(x)) -} - -// MakeUint returns a Starlark int for the specified unsigned integer. -func MakeUint(x uint) Int { return MakeUint64(uint64(x)) } - -// MakeUint64 returns a Starlark int for the specified uint64. -func MakeUint64(x uint64) Int { - if x <= math.MaxInt32 { - return makeSmallInt(int64(x)) - } - return makeBigInt(new(big.Int).SetUint64(x)) -} - -// MakeBigInt returns a Starlark int for the specified big.Int. -// The new Int value will contain a copy of x. The caller is safe to modify x. -func MakeBigInt(x *big.Int) Int { - if isSmall(x) { - return makeSmallInt(x.Int64()) - } - z := new(big.Int).Set(x) - return makeBigInt(z) -} - -func isSmall(x *big.Int) bool { - n := x.BitLen() - return n < 32 || n == 32 && x.Int64() == math.MinInt32 -} - -var ( - zero, one = makeSmallInt(0), makeSmallInt(1) - oneBig = big.NewInt(1) - - _ HasUnary = Int{} -) - -// Unary implements the operations +int, -int, and ~int. -func (i Int) Unary(op syntax.Token) (Value, error) { - switch op { - case syntax.MINUS: - return zero.Sub(i), nil - case syntax.PLUS: - return i, nil - case syntax.TILDE: - return i.Not(), nil - } - return nil, nil -} - -// Int64 returns the value as an int64. -// If it is not exactly representable the result is undefined and ok is false. -func (i Int) Int64() (_ int64, ok bool) { - iSmall, iBig := i.get() - if iBig != nil { - x, acc := bigintToInt64(iBig) - if acc != big.Exact { - return // inexact - } - return x, true - } - return iSmall, true -} - -// BigInt returns a new big.Int with the same value as the Int. -func (i Int) BigInt() *big.Int { - iSmall, iBig := i.get() - if iBig != nil { - return new(big.Int).Set(iBig) - } - return big.NewInt(iSmall) -} - -// bigInt returns the value as a big.Int. -// It differs from BigInt in that this method returns the actual -// reference and any modification will change the state of i. -func (i Int) bigInt() *big.Int { - iSmall, iBig := i.get() - if iBig != nil { - return iBig - } - return big.NewInt(iSmall) -} - -// Uint64 returns the value as a uint64. -// If it is not exactly representable the result is undefined and ok is false. -func (i Int) Uint64() (_ uint64, ok bool) { - iSmall, iBig := i.get() - if iBig != nil { - x, acc := bigintToUint64(iBig) - if acc != big.Exact { - return // inexact - } - return x, true - } - if iSmall < 0 { - return // inexact - } - return uint64(iSmall), true -} - -// The math/big API should provide this function. -func bigintToInt64(i *big.Int) (int64, big.Accuracy) { - sign := i.Sign() - if sign > 0 { - if i.Cmp(maxint64) > 0 { - return math.MaxInt64, big.Below - } - } else if sign < 0 { - if i.Cmp(minint64) < 0 { - return math.MinInt64, big.Above - } - } - return i.Int64(), big.Exact -} - -// The math/big API should provide this function. -func bigintToUint64(i *big.Int) (uint64, big.Accuracy) { - sign := i.Sign() - if sign > 0 { - if i.BitLen() > 64 { - return math.MaxUint64, big.Below - } - } else if sign < 0 { - return 0, big.Above - } - return i.Uint64(), big.Exact -} - -var ( - minint64 = new(big.Int).SetInt64(math.MinInt64) - maxint64 = new(big.Int).SetInt64(math.MaxInt64) -) - -func (i Int) Format(s fmt.State, ch rune) { - iSmall, iBig := i.get() - if iBig != nil { - iBig.Format(s, ch) - return - } - big.NewInt(iSmall).Format(s, ch) -} -func (i Int) String() string { - iSmall, iBig := i.get() - if iBig != nil { - return iBig.Text(10) - } - return strconv.FormatInt(iSmall, 10) -} -func (i Int) Type() string { return "int" } -func (i Int) Freeze() {} // immutable -func (i Int) Truth() Bool { return i.Sign() != 0 } -func (i Int) Hash() (uint32, error) { - iSmall, iBig := i.get() - var lo big.Word - if iBig != nil { - lo = iBig.Bits()[0] - } else { - lo = big.Word(iSmall) - } - return 12582917 * uint32(lo+3), nil -} - -// Required by the TotallyOrdered interface -func (x Int) Cmp(v Value, depth int) (int, error) { - y := v.(Int) - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return x.bigInt().Cmp(y.bigInt()), nil - } - return signum64(xSmall - ySmall), nil // safe: int32 operands -} - -// Float returns the float value nearest i. -func (i Int) Float() Float { - iSmall, iBig := i.get() - if iBig != nil { - // Fast path for hardware int-to-float conversions. - if iBig.IsUint64() { - return Float(iBig.Uint64()) - } else if iBig.IsInt64() { - return Float(iBig.Int64()) - } - - f, _ := new(big.Float).SetInt(iBig).Float64() - return Float(f) - } - return Float(iSmall) -} - -// finiteFloat returns the finite float value nearest i, -// or an error if the magnitude is too large. -func (i Int) finiteFloat() (Float, error) { - f := i.Float() - if math.IsInf(float64(f), 0) { - return 0, fmt.Errorf("int too large to convert to float") - } - return f, nil -} - -func (x Int) Sign() int { - xSmall, xBig := x.get() - if xBig != nil { - return xBig.Sign() - } - return signum64(xSmall) -} - -func (x Int) Add(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return MakeBigInt(new(big.Int).Add(x.bigInt(), y.bigInt())) - } - return MakeInt64(xSmall + ySmall) -} -func (x Int) Sub(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return MakeBigInt(new(big.Int).Sub(x.bigInt(), y.bigInt())) - } - return MakeInt64(xSmall - ySmall) -} -func (x Int) Mul(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return MakeBigInt(new(big.Int).Mul(x.bigInt(), y.bigInt())) - } - return MakeInt64(xSmall * ySmall) -} -func (x Int) Or(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return MakeBigInt(new(big.Int).Or(x.bigInt(), y.bigInt())) - } - return makeSmallInt(xSmall | ySmall) -} -func (x Int) And(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return MakeBigInt(new(big.Int).And(x.bigInt(), y.bigInt())) - } - return makeSmallInt(xSmall & ySmall) -} -func (x Int) Xor(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - return MakeBigInt(new(big.Int).Xor(x.bigInt(), y.bigInt())) - } - return makeSmallInt(xSmall ^ ySmall) -} -func (x Int) Not() Int { - xSmall, xBig := x.get() - if xBig != nil { - return MakeBigInt(new(big.Int).Not(xBig)) - } - return makeSmallInt(^xSmall) -} -func (x Int) Lsh(y uint) Int { return MakeBigInt(new(big.Int).Lsh(x.bigInt(), y)) } -func (x Int) Rsh(y uint) Int { return MakeBigInt(new(big.Int).Rsh(x.bigInt(), y)) } - -// Precondition: y is nonzero. -func (x Int) Div(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - // http://python-history.blogspot.com/2010/08/why-pythons-integer-division-floors.html - if xBig != nil || yBig != nil { - xb, yb := x.bigInt(), y.bigInt() - - var quo, rem big.Int - quo.QuoRem(xb, yb, &rem) - if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { - quo.Sub(&quo, oneBig) - } - return MakeBigInt(&quo) - } - quo := xSmall / ySmall - rem := xSmall % ySmall - if (xSmall < 0) != (ySmall < 0) && rem != 0 { - quo -= 1 - } - return MakeInt64(quo) -} - -// Precondition: y is nonzero. -func (x Int) Mod(y Int) Int { - xSmall, xBig := x.get() - ySmall, yBig := y.get() - if xBig != nil || yBig != nil { - xb, yb := x.bigInt(), y.bigInt() - - var quo, rem big.Int - quo.QuoRem(xb, yb, &rem) - if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { - rem.Add(&rem, yb) - } - return MakeBigInt(&rem) - } - rem := xSmall % ySmall - if (xSmall < 0) != (ySmall < 0) && rem != 0 { - rem += ySmall - } - return makeSmallInt(rem) -} - -func (i Int) rational() *big.Rat { - iSmall, iBig := i.get() - if iBig != nil { - return new(big.Rat).SetInt(iBig) - } - return new(big.Rat).SetInt64(iSmall) -} - -// AsInt32 returns the value of x if is representable as an int32. -func AsInt32(x Value) (int, error) { - i, ok := x.(Int) - if !ok { - return 0, fmt.Errorf("got %s, want int", x.Type()) - } - iSmall, iBig := i.get() - if iBig != nil { - return 0, fmt.Errorf("%s out of range", i) - } - return int(iSmall), nil -} - -// AsInt sets *ptr to the value of Starlark int x, if it is exactly representable, -// otherwise it returns an error. -// The type of ptr must be one of the pointer types *int, *int8, *int16, *int32, or *int64, -// or one of their unsigned counterparts including *uintptr. -func AsInt(x Value, ptr interface{}) error { - xint, ok := x.(Int) - if !ok { - return fmt.Errorf("got %s, want int", x.Type()) - } - - bits := reflect.TypeOf(ptr).Elem().Size() * 8 - switch ptr.(type) { - case *int, *int8, *int16, *int32, *int64: - i, ok := xint.Int64() - if !ok || bits < 64 && !(-1<<(bits-1) <= i && i < 1<<(bits-1)) { - return fmt.Errorf("%s out of range (want value in signed %d-bit range)", xint, bits) - } - switch ptr := ptr.(type) { - case *int: - *ptr = int(i) - case *int8: - *ptr = int8(i) - case *int16: - *ptr = int16(i) - case *int32: - *ptr = int32(i) - case *int64: - *ptr = int64(i) - } - - case *uint, *uint8, *uint16, *uint32, *uint64, *uintptr: - i, ok := xint.Uint64() - if !ok || bits < 64 && i >= 1< value is not representable as int32 -} - -// --- low-level accessors --- - -// get returns the small and big components of the Int. -// small is defined only if big is nil. -// small is sign-extended to 64 bits for ease of subsequent arithmetic. -func (i Int) get() (small int64, big *big.Int) { - return i.impl.small_, i.impl.big_ -} - -// Precondition: math.MinInt32 <= x && x <= math.MaxInt32 -func makeSmallInt(x int64) Int { - return Int{intImpl{small_: x}} -} - -// Precondition: x cannot be represented as int32. -func makeBigInt(x *big.Int) Int { - return Int{intImpl{big_: x}} -} diff --git a/vendor/go.starlark.net/starlark/int_posix64.go b/vendor/go.starlark.net/starlark/int_posix64.go deleted file mode 100644 index 2ab0beda3..000000000 --- a/vendor/go.starlark.net/starlark/int_posix64.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build (linux || darwin || dragonfly || freebsd || netbsd || solaris) && (amd64 || arm64 || mips64x || ppc64x || loong64) -// +build linux darwin dragonfly freebsd netbsd solaris -// +build amd64 arm64 mips64x ppc64x loong64 - -package starlark - -// This file defines an optimized Int implementation for 64-bit machines -// running POSIX. It reserves a 4GB portion of the address space using -// mmap and represents int32 values as addresses within that range. This -// disambiguates int32 values from *big.Int pointers, letting all Int -// values be represented as an unsafe.Pointer, so that Int-to-Value -// interface conversion need not allocate. - -// Although iOS (which, like macOS, appears as darwin/arm64) is -// POSIX-compliant, it limits each process to about 700MB of virtual -// address space, which defeats the optimization. Similarly, -// OpenBSD's default ulimit for virtual memory is a measly GB or so. -// On both those platforms the attempted optimization will fail and -// fall back to the slow implementation. - -// An alternative approach to this optimization would be to embed the -// int32 values in pointers using odd values, which can be distinguished -// from (even) *big.Int pointers. However, the Go runtime does not allow -// user programs to manufacture pointers to arbitrary locations such as -// within the zero page, or non-span, non-mmap, non-stack locations, -// and it may panic if it encounters them; see Issue #382. - -import ( - "log" - "math" - "math/big" - "unsafe" - - "golang.org/x/sys/unix" -) - -// intImpl represents a union of (int32, *big.Int) in a single pointer, -// so that Int-to-Value conversions need not allocate. -// -// The pointer is either a *big.Int, if the value is big, or a pointer into a -// reserved portion of the address space (smallints), if the value is small -// and the address space allocation succeeded. -// -// See int_generic.go for the basic representation concepts. -type intImpl unsafe.Pointer - -// get returns the (small, big) arms of the union. -func (i Int) get() (int64, *big.Int) { - if smallints == 0 { - // optimization disabled - if x := (*big.Int)(i.impl); isSmall(x) { - return x.Int64(), nil - } else { - return 0, x - } - } - - if ptr := uintptr(i.impl); ptr >= smallints && ptr < smallints+1<<32 { - return math.MinInt32 + int64(ptr-smallints), nil - } - return 0, (*big.Int)(i.impl) -} - -// Precondition: math.MinInt32 <= x && x <= math.MaxInt32 -func makeSmallInt(x int64) Int { - if smallints == 0 { - // optimization disabled - return Int{intImpl(big.NewInt(x))} - } - - return Int{intImpl(uintptr(x-math.MinInt32) + smallints)} -} - -// Precondition: x cannot be represented as int32. -func makeBigInt(x *big.Int) Int { return Int{intImpl(x)} } - -// smallints is the base address of a 2^32 byte memory region. -// Pointers to addresses in this region represent int32 values. -// We assume smallints is not at the very top of the address space. -// -// Zero means the optimization is disabled and all Ints allocate a big.Int. -var smallints = reserveAddresses(1 << 32) - -func reserveAddresses(len int) uintptr { - b, err := unix.Mmap(-1, 0, len, unix.PROT_READ, unix.MAP_PRIVATE|unix.MAP_ANON) - if err != nil { - log.Printf("Starlark failed to allocate 4GB address space: %v. Integer performance may suffer.", err) - return 0 // optimization disabled - } - return uintptr(unsafe.Pointer(&b[0])) -} diff --git a/vendor/go.starlark.net/starlark/interp.go b/vendor/go.starlark.net/starlark/interp.go deleted file mode 100644 index b41905a0b..000000000 --- a/vendor/go.starlark.net/starlark/interp.go +++ /dev/null @@ -1,705 +0,0 @@ -package starlark - -// This file defines the bytecode interpreter. - -import ( - "fmt" - "os" - "sync/atomic" - "unsafe" - - "go.starlark.net/internal/compile" - "go.starlark.net/internal/spell" - "go.starlark.net/resolve" - "go.starlark.net/syntax" -) - -const vmdebug = false // TODO(adonovan): use a bitfield of specific kinds of error. - -// TODO(adonovan): -// - optimize position table. -// - opt: record MaxIterStack during compilation and preallocate the stack. - -func (fn *Function) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { - // Postcondition: args is not mutated. This is stricter than required by Callable, - // but allows CALL to avoid a copy. - - if !resolve.AllowRecursion { - // detect recursion - for _, fr := range thread.stack[:len(thread.stack)-1] { - // We look for the same function code, - // not function value, otherwise the user could - // defeat the check by writing the Y combinator. - if frfn, ok := fr.Callable().(*Function); ok && frfn.funcode == fn.funcode { - return nil, fmt.Errorf("function %s called recursively", fn.Name()) - } - } - } - - f := fn.funcode - fr := thread.frameAt(0) - - // Allocate space for stack and locals. - // Logically these do not escape from this frame - // (See https://github.com/golang/go/issues/20533.) - // - // This heap allocation looks expensive, but I was unable to get - // more than 1% real time improvement in a large alloc-heavy - // benchmark (in which this alloc was 8% of alloc-bytes) - // by allocating space for 8 Values in each frame, or - // by allocating stack by slicing an array held by the Thread - // that is expanded in chunks of min(k, nspace), for k=256 or 1024. - nlocals := len(f.Locals) - nspace := nlocals + f.MaxStack - space := make([]Value, nspace) - locals := space[:nlocals:nlocals] // local variables, starting with parameters - stack := space[nlocals:] // operand stack - - // Digest arguments and set parameters. - err := setArgs(locals, fn, args, kwargs) - if err != nil { - return nil, thread.evalError(err) - } - - fr.locals = locals - - if vmdebug { - fmt.Printf("Entering %s @ %s\n", f.Name, f.Position(0)) - fmt.Printf("%d stack, %d locals\n", len(stack), len(locals)) - defer fmt.Println("Leaving ", f.Name) - } - - // Spill indicated locals to cells. - // Each cell is a separate alloc to avoid spurious liveness. - for _, index := range f.Cells { - locals[index] = &cell{locals[index]} - } - - // TODO(adonovan): add static check that beneath this point - // - there is exactly one return statement - // - there is no redefinition of 'err'. - - var iterstack []Iterator // stack of active iterators - - // Use defer so that application panics can pass through - // interpreter without leaving thread in a bad state. - defer func() { - // ITERPOP the rest of the iterator stack. - for _, iter := range iterstack { - iter.Done() - } - - fr.locals = nil - }() - - sp := 0 - var pc uint32 - var result Value - code := f.Code -loop: - for { - thread.Steps++ - if thread.Steps >= thread.maxSteps { - if thread.OnMaxSteps != nil { - thread.OnMaxSteps(thread) - } else { - thread.Cancel("too many steps") - } - } - if reason := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason))); reason != nil { - err = fmt.Errorf("Starlark computation cancelled: %s", *(*string)(reason)) - break loop - } - - fr.pc = pc - - op := compile.Opcode(code[pc]) - pc++ - var arg uint32 - if op >= compile.OpcodeArgMin { - // TODO(adonovan): opt: profile this. - // Perhaps compiling big endian would be less work to decode? - for s := uint(0); ; s += 7 { - b := code[pc] - pc++ - arg |= uint32(b&0x7f) << s - if b < 0x80 { - break - } - } - } - if vmdebug { - fmt.Fprintln(os.Stderr, stack[:sp]) // very verbose! - compile.PrintOp(f, fr.pc, op, arg) - } - - switch op { - case compile.NOP: - // nop - - case compile.DUP: - stack[sp] = stack[sp-1] - sp++ - - case compile.DUP2: - stack[sp] = stack[sp-2] - stack[sp+1] = stack[sp-1] - sp += 2 - - case compile.POP: - sp-- - - case compile.EXCH: - stack[sp-2], stack[sp-1] = stack[sp-1], stack[sp-2] - - case compile.EQL, compile.NEQ, compile.GT, compile.LT, compile.LE, compile.GE: - op := syntax.Token(op-compile.EQL) + syntax.EQL - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - ok, err2 := Compare(op, x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = Bool(ok) - sp++ - - case compile.PLUS, - compile.MINUS, - compile.STAR, - compile.SLASH, - compile.SLASHSLASH, - compile.PERCENT, - compile.AMP, - compile.PIPE, - compile.CIRCUMFLEX, - compile.LTLT, - compile.GTGT, - compile.IN: - binop := syntax.Token(op-compile.PLUS) + syntax.PLUS - if op == compile.IN { - binop = syntax.IN // IN token is out of order - } - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - z, err2 := Binary(binop, x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = z - sp++ - - case compile.UPLUS, compile.UMINUS, compile.TILDE: - var unop syntax.Token - if op == compile.TILDE { - unop = syntax.TILDE - } else { - unop = syntax.Token(op-compile.UPLUS) + syntax.PLUS - } - x := stack[sp-1] - y, err2 := Unary(unop, x) - if err2 != nil { - err = err2 - break loop - } - stack[sp-1] = y - - case compile.INPLACE_ADD: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - - // It's possible that y is not Iterable but - // nonetheless defines x+y, in which case we - // should fall back to the general case. - var z Value - if xlist, ok := x.(*List); ok { - if yiter, ok := y.(Iterable); ok { - if err = xlist.checkMutable("apply += to"); err != nil { - break loop - } - listExtend(xlist, yiter) - z = xlist - } - } - if z == nil { - z, err = Binary(syntax.PLUS, x, y) - if err != nil { - break loop - } - } - - stack[sp] = z - sp++ - - case compile.INPLACE_PIPE: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - - // It's possible that y is not Dict but - // nonetheless defines x|y, in which case we - // should fall back to the general case. - var z Value - if xdict, ok := x.(*Dict); ok { - if ydict, ok := y.(*Dict); ok { - if err = xdict.ht.checkMutable("apply |= to"); err != nil { - break loop - } - xdict.ht.addAll(&ydict.ht) // can't fail - z = xdict - } - } - if z == nil { - z, err = Binary(syntax.PIPE, x, y) - if err != nil { - break loop - } - } - - stack[sp] = z - sp++ - - case compile.NONE: - stack[sp] = None - sp++ - - case compile.TRUE: - stack[sp] = True - sp++ - - case compile.FALSE: - stack[sp] = False - sp++ - - case compile.MANDATORY: - stack[sp] = mandatory{} - sp++ - - case compile.JMP: - pc = arg - - case compile.CALL, compile.CALL_VAR, compile.CALL_KW, compile.CALL_VAR_KW: - var kwargs Value - if op == compile.CALL_KW || op == compile.CALL_VAR_KW { - kwargs = stack[sp-1] - sp-- - } - - var args Value - if op == compile.CALL_VAR || op == compile.CALL_VAR_KW { - args = stack[sp-1] - sp-- - } - - // named args (pairs) - var kvpairs []Tuple - if nkvpairs := int(arg & 0xff); nkvpairs > 0 { - kvpairs = make([]Tuple, 0, nkvpairs) - kvpairsAlloc := make(Tuple, 2*nkvpairs) // allocate a single backing array - sp -= 2 * nkvpairs - for i := 0; i < nkvpairs; i++ { - pair := kvpairsAlloc[:2:2] - kvpairsAlloc = kvpairsAlloc[2:] - pair[0] = stack[sp+2*i] // name - pair[1] = stack[sp+2*i+1] // value - kvpairs = append(kvpairs, pair) - } - } - if kwargs != nil { - // Add key/value items from **kwargs dictionary. - dict, ok := kwargs.(IterableMapping) - if !ok { - err = fmt.Errorf("argument after ** must be a mapping, not %s", kwargs.Type()) - break loop - } - items := dict.Items() - for _, item := range items { - if _, ok := item[0].(String); !ok { - err = fmt.Errorf("keywords must be strings, not %s", item[0].Type()) - break loop - } - } - if len(kvpairs) == 0 { - kvpairs = items - } else { - kvpairs = append(kvpairs, items...) - } - } - - // positional args - var positional Tuple - if npos := int(arg >> 8); npos > 0 { - positional = stack[sp-npos : sp] - sp -= npos - - // Copy positional arguments into a new array, - // unless the callee is another Starlark function, - // in which case it can be trusted not to mutate them. - if _, ok := stack[sp-1].(*Function); !ok || args != nil { - positional = append(Tuple(nil), positional...) - } - } - if args != nil { - // Add elements from *args sequence. - iter := Iterate(args) - if iter == nil { - err = fmt.Errorf("argument after * must be iterable, not %s", args.Type()) - break loop - } - var elem Value - for iter.Next(&elem) { - positional = append(positional, elem) - } - iter.Done() - } - - function := stack[sp-1] - - if vmdebug { - fmt.Printf("VM call %s args=%s kwargs=%s @%s\n", - function, positional, kvpairs, f.Position(fr.pc)) - } - - thread.endProfSpan() - z, err2 := Call(thread, function, positional, kvpairs) - thread.beginProfSpan() - if err2 != nil { - err = err2 - break loop - } - if vmdebug { - fmt.Printf("Resuming %s @ %s\n", f.Name, f.Position(0)) - } - stack[sp-1] = z - - case compile.ITERPUSH: - x := stack[sp-1] - sp-- - iter := Iterate(x) - if iter == nil { - err = fmt.Errorf("%s value is not iterable", x.Type()) - break loop - } - iterstack = append(iterstack, iter) - - case compile.ITERJMP: - iter := iterstack[len(iterstack)-1] - if iter.Next(&stack[sp]) { - sp++ - } else { - pc = arg - } - - case compile.ITERPOP: - n := len(iterstack) - 1 - iterstack[n].Done() - iterstack = iterstack[:n] - - case compile.NOT: - stack[sp-1] = !stack[sp-1].Truth() - - case compile.RETURN: - result = stack[sp-1] - break loop - - case compile.SETINDEX: - z := stack[sp-1] - y := stack[sp-2] - x := stack[sp-3] - sp -= 3 - err = setIndex(x, y, z) - if err != nil { - break loop - } - - case compile.INDEX: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - z, err2 := getIndex(x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = z - sp++ - - case compile.ATTR: - x := stack[sp-1] - name := f.Prog.Names[arg] - y, err2 := getAttr(x, name) - if err2 != nil { - err = err2 - break loop - } - stack[sp-1] = y - - case compile.SETFIELD: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - name := f.Prog.Names[arg] - if err2 := setField(x, name, y); err2 != nil { - err = err2 - break loop - } - - case compile.MAKEDICT: - stack[sp] = new(Dict) - sp++ - - case compile.SETDICT, compile.SETDICTUNIQ: - dict := stack[sp-3].(*Dict) - k := stack[sp-2] - v := stack[sp-1] - sp -= 3 - oldlen := dict.Len() - if err2 := dict.SetKey(k, v); err2 != nil { - err = err2 - break loop - } - if op == compile.SETDICTUNIQ && dict.Len() == oldlen { - err = fmt.Errorf("duplicate key: %v", k) - break loop - } - - case compile.APPEND: - elem := stack[sp-1] - list := stack[sp-2].(*List) - sp -= 2 - list.elems = append(list.elems, elem) - - case compile.SLICE: - x := stack[sp-4] - lo := stack[sp-3] - hi := stack[sp-2] - step := stack[sp-1] - sp -= 4 - res, err2 := slice(x, lo, hi, step) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = res - sp++ - - case compile.UNPACK: - n := int(arg) - iterable := stack[sp-1] - sp-- - iter := Iterate(iterable) - if iter == nil { - err = fmt.Errorf("got %s in sequence assignment", iterable.Type()) - break loop - } - i := 0 - sp += n - for i < n && iter.Next(&stack[sp-1-i]) { - i++ - } - var dummy Value - if iter.Next(&dummy) { - // NB: Len may return -1 here in obscure cases. - err = fmt.Errorf("too many values to unpack (got %d, want %d)", Len(iterable), n) - break loop - } - iter.Done() - if i < n { - err = fmt.Errorf("too few values to unpack (got %d, want %d)", i, n) - break loop - } - - case compile.CJMP: - if stack[sp-1].Truth() { - pc = arg - } - sp-- - - case compile.CONSTANT: - stack[sp] = fn.module.constants[arg] - sp++ - - case compile.MAKETUPLE: - n := int(arg) - tuple := make(Tuple, n) - sp -= n - copy(tuple, stack[sp:]) - stack[sp] = tuple - sp++ - - case compile.MAKELIST: - n := int(arg) - elems := make([]Value, n) - sp -= n - copy(elems, stack[sp:]) - stack[sp] = NewList(elems) - sp++ - - case compile.MAKEFUNC: - funcode := f.Prog.Functions[arg] - tuple := stack[sp-1].(Tuple) - n := len(tuple) - len(funcode.Freevars) - defaults := tuple[:n:n] - freevars := tuple[n:] - stack[sp-1] = &Function{ - funcode: funcode, - module: fn.module, - defaults: defaults, - freevars: freevars, - } - - case compile.LOAD: - n := int(arg) - module := string(stack[sp-1].(String)) - sp-- - - if thread.Load == nil { - err = fmt.Errorf("load not implemented by this application") - break loop - } - - thread.endProfSpan() - dict, err2 := thread.Load(thread, module) - thread.beginProfSpan() - if err2 != nil { - err = wrappedError{ - msg: fmt.Sprintf("cannot load %s: %v", module, err2), - cause: err2, - } - break loop - } - - for i := 0; i < n; i++ { - from := string(stack[sp-1-i].(String)) - v, ok := dict[from] - if !ok { - err = fmt.Errorf("load: name %s not found in module %s", from, module) - if n := spell.Nearest(from, dict.Keys()); n != "" { - err = fmt.Errorf("%s (did you mean %s?)", err, n) - } - break loop - } - stack[sp-1-i] = v - } - - case compile.SETLOCAL: - locals[arg] = stack[sp-1] - sp-- - - case compile.SETLOCALCELL: - locals[arg].(*cell).v = stack[sp-1] - sp-- - - case compile.SETGLOBAL: - fn.module.globals[arg] = stack[sp-1] - sp-- - - case compile.LOCAL: - x := locals[arg] - if x == nil { - err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name) - break loop - } - stack[sp] = x - sp++ - - case compile.FREE: - stack[sp] = fn.freevars[arg] - sp++ - - case compile.LOCALCELL: - v := locals[arg].(*cell).v - if v == nil { - err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name) - break loop - } - stack[sp] = v - sp++ - - case compile.FREECELL: - v := fn.freevars[arg].(*cell).v - if v == nil { - err = fmt.Errorf("local variable %s referenced before assignment", f.Freevars[arg].Name) - break loop - } - stack[sp] = v - sp++ - - case compile.GLOBAL: - x := fn.module.globals[arg] - if x == nil { - err = fmt.Errorf("global variable %s referenced before assignment", f.Prog.Globals[arg].Name) - break loop - } - stack[sp] = x - sp++ - - case compile.PREDECLARED: - name := f.Prog.Names[arg] - x := fn.module.predeclared[name] - if x == nil { - err = fmt.Errorf("internal error: predeclared variable %s is uninitialized", name) - break loop - } - stack[sp] = x - sp++ - - case compile.UNIVERSAL: - stack[sp] = Universe[f.Prog.Names[arg]] - sp++ - - default: - err = fmt.Errorf("unimplemented: %s", op) - break loop - } - } - // (deferred cleanup runs here) - return result, err -} - -type wrappedError struct { - msg string - cause error -} - -func (e wrappedError) Error() string { - return e.msg -} - -// Implements the xerrors.Wrapper interface -// https://godoc.org/golang.org/x/xerrors#Wrapper -func (e wrappedError) Unwrap() error { - return e.cause -} - -// mandatory is a sentinel value used in a function's defaults tuple -// to indicate that a (keyword-only) parameter is mandatory. -type mandatory struct{} - -func (mandatory) String() string { return "mandatory" } -func (mandatory) Type() string { return "mandatory" } -func (mandatory) Freeze() {} // immutable -func (mandatory) Truth() Bool { return False } -func (mandatory) Hash() (uint32, error) { return 0, nil } - -// A cell is a box containing a Value. -// Local variables marked as cells hold their value indirectly -// so that they may be shared by outer and inner nested functions. -// Cells are always accessed using indirect {FREE,LOCAL,SETLOCAL}CELL instructions. -// The FreeVars tuple contains only cells. -// The FREE instruction always yields a cell. -type cell struct{ v Value } - -func (c *cell) String() string { return "cell" } -func (c *cell) Type() string { return "cell" } -func (c *cell) Freeze() { - if c.v != nil { - c.v.Freeze() - } -} -func (c *cell) Truth() Bool { panic("unreachable") } -func (c *cell) Hash() (uint32, error) { panic("unreachable") } diff --git a/vendor/go.starlark.net/starlark/library.go b/vendor/go.starlark.net/starlark/library.go deleted file mode 100644 index 1c801be64..000000000 --- a/vendor/go.starlark.net/starlark/library.go +++ /dev/null @@ -1,2289 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -// This file defines the library of built-ins. -// -// Built-ins must explicitly check the "frozen" flag before updating -// mutable types such as lists and dicts. - -import ( - "errors" - "fmt" - "math" - "math/big" - "os" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "go.starlark.net/syntax" -) - -// Universe defines the set of universal built-ins, such as None, True, and len. -// -// The Go application may add or remove items from the -// universe dictionary before Starlark evaluation begins. -// All values in the dictionary must be immutable. -// Starlark programs cannot modify the dictionary. -var Universe StringDict - -func init() { - // https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-constants-and-functions - Universe = StringDict{ - "None": None, - "True": True, - "False": False, - "abs": NewBuiltin("abs", abs), - "any": NewBuiltin("any", any), - "all": NewBuiltin("all", all), - "bool": NewBuiltin("bool", bool_), - "bytes": NewBuiltin("bytes", bytes_), - "chr": NewBuiltin("chr", chr), - "dict": NewBuiltin("dict", dict), - "dir": NewBuiltin("dir", dir), - "enumerate": NewBuiltin("enumerate", enumerate), - "fail": NewBuiltin("fail", fail), - "float": NewBuiltin("float", float), - "getattr": NewBuiltin("getattr", getattr), - "hasattr": NewBuiltin("hasattr", hasattr), - "hash": NewBuiltin("hash", hash), - "int": NewBuiltin("int", int_), - "len": NewBuiltin("len", len_), - "list": NewBuiltin("list", list), - "max": NewBuiltin("max", minmax), - "min": NewBuiltin("min", minmax), - "ord": NewBuiltin("ord", ord), - "print": NewBuiltin("print", print), - "range": NewBuiltin("range", range_), - "repr": NewBuiltin("repr", repr), - "reversed": NewBuiltin("reversed", reversed), - "set": NewBuiltin("set", set), // requires resolve.AllowSet - "sorted": NewBuiltin("sorted", sorted), - "str": NewBuiltin("str", str), - "tuple": NewBuiltin("tuple", tuple), - "type": NewBuiltin("type", type_), - "zip": NewBuiltin("zip", zip), - } -} - -// methods of built-in types -// https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-methods -var ( - bytesMethods = map[string]*Builtin{ - "elems": NewBuiltin("elems", bytes_elems), - } - - dictMethods = map[string]*Builtin{ - "clear": NewBuiltin("clear", dict_clear), - "get": NewBuiltin("get", dict_get), - "items": NewBuiltin("items", dict_items), - "keys": NewBuiltin("keys", dict_keys), - "pop": NewBuiltin("pop", dict_pop), - "popitem": NewBuiltin("popitem", dict_popitem), - "setdefault": NewBuiltin("setdefault", dict_setdefault), - "update": NewBuiltin("update", dict_update), - "values": NewBuiltin("values", dict_values), - } - - listMethods = map[string]*Builtin{ - "append": NewBuiltin("append", list_append), - "clear": NewBuiltin("clear", list_clear), - "extend": NewBuiltin("extend", list_extend), - "index": NewBuiltin("index", list_index), - "insert": NewBuiltin("insert", list_insert), - "pop": NewBuiltin("pop", list_pop), - "remove": NewBuiltin("remove", list_remove), - } - - stringMethods = map[string]*Builtin{ - "capitalize": NewBuiltin("capitalize", string_capitalize), - "codepoint_ords": NewBuiltin("codepoint_ords", string_iterable), - "codepoints": NewBuiltin("codepoints", string_iterable), // sic - "count": NewBuiltin("count", string_count), - "elem_ords": NewBuiltin("elem_ords", string_iterable), - "elems": NewBuiltin("elems", string_iterable), // sic - "endswith": NewBuiltin("endswith", string_startswith), // sic - "find": NewBuiltin("find", string_find), - "format": NewBuiltin("format", string_format), - "index": NewBuiltin("index", string_index), - "isalnum": NewBuiltin("isalnum", string_isalnum), - "isalpha": NewBuiltin("isalpha", string_isalpha), - "isdigit": NewBuiltin("isdigit", string_isdigit), - "islower": NewBuiltin("islower", string_islower), - "isspace": NewBuiltin("isspace", string_isspace), - "istitle": NewBuiltin("istitle", string_istitle), - "isupper": NewBuiltin("isupper", string_isupper), - "join": NewBuiltin("join", string_join), - "lower": NewBuiltin("lower", string_lower), - "lstrip": NewBuiltin("lstrip", string_strip), // sic - "partition": NewBuiltin("partition", string_partition), - "removeprefix": NewBuiltin("removeprefix", string_removefix), - "removesuffix": NewBuiltin("removesuffix", string_removefix), - "replace": NewBuiltin("replace", string_replace), - "rfind": NewBuiltin("rfind", string_rfind), - "rindex": NewBuiltin("rindex", string_rindex), - "rpartition": NewBuiltin("rpartition", string_partition), // sic - "rsplit": NewBuiltin("rsplit", string_split), // sic - "rstrip": NewBuiltin("rstrip", string_strip), // sic - "split": NewBuiltin("split", string_split), - "splitlines": NewBuiltin("splitlines", string_splitlines), - "startswith": NewBuiltin("startswith", string_startswith), - "strip": NewBuiltin("strip", string_strip), - "title": NewBuiltin("title", string_title), - "upper": NewBuiltin("upper", string_upper), - } - - setMethods = map[string]*Builtin{ - "union": NewBuiltin("union", set_union), - } -) - -func builtinAttr(recv Value, name string, methods map[string]*Builtin) (Value, error) { - b := methods[name] - if b == nil { - return nil, nil // no such method - } - return b.BindReceiver(recv), nil -} - -func builtinAttrNames(methods map[string]*Builtin) []string { - names := make([]string, 0, len(methods)) - for name := range methods { - names = append(names, name) - } - sort.Strings(names) - return names -} - -// ---- built-in functions ---- - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#abs -func abs(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("abs", args, kwargs, 1, &x); err != nil { - return nil, err - } - switch x := x.(type) { - case Float: - return Float(math.Abs(float64(x))), nil - case Int: - if x.Sign() >= 0 { - return x, nil - } - return zero.Sub(x), nil - default: - return nil, fmt.Errorf("got %s, want int or float", x.Type()) - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#all -func all(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("all", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if !x.Truth() { - return False, nil - } - } - return True, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#any -func any(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("any", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if x.Truth() { - return True, nil - } - } - return False, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#bool -func bool_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value = False - if err := UnpackPositionalArgs("bool", args, kwargs, 0, &x); err != nil { - return nil, err - } - return x.Truth(), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#bytes -func bytes_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("bytes does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("bytes: got %d arguments, want exactly 1", len(args)) - } - switch x := args[0].(type) { - case Bytes: - return x, nil - case String: - // Invalid encodings are replaced by that of U+FFFD. - return Bytes(utf8Transcode(string(x))), nil - case Iterable: - // iterable of numeric byte values - var buf strings.Builder - if n := Len(x); n >= 0 { - // common case: known length - buf.Grow(n) - } - iter := x.Iterate() - defer iter.Done() - var elem Value - var b byte - for i := 0; iter.Next(&elem); i++ { - if err := AsInt(elem, &b); err != nil { - return nil, fmt.Errorf("bytes: at index %d, %s", i, err) - } - buf.WriteByte(b) - } - return Bytes(buf.String()), nil - - default: - // Unlike string(foo), which stringifies it, bytes(foo) is an error. - return nil, fmt.Errorf("bytes: got %s, want string, bytes, or iterable of ints", x.Type()) - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#chr -func chr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("chr does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("chr: got %d arguments, want 1", len(args)) - } - i, err := AsInt32(args[0]) - if err != nil { - return nil, fmt.Errorf("chr: %s", err) - } - if i < 0 { - return nil, fmt.Errorf("chr: Unicode code point %d out of range (<0)", i) - } - if i > unicode.MaxRune { - return nil, fmt.Errorf("chr: Unicode code point U+%X out of range (>0x10FFFF)", i) - } - return String(string(rune(i))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict -func dict(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) > 1 { - return nil, fmt.Errorf("dict: got %d arguments, want at most 1", len(args)) - } - dict := new(Dict) - if err := updateDict(dict, args, kwargs); err != nil { - return nil, fmt.Errorf("dict: %v", err) - } - return dict, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dir -func dir(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("dir does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("dir: got %d arguments, want 1", len(args)) - } - - var names []string - if x, ok := args[0].(HasAttrs); ok { - names = x.AttrNames() - } - sort.Strings(names) - elems := make([]Value, len(names)) - for i, name := range names { - elems[i] = String(name) - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#enumerate -func enumerate(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - var start int - if err := UnpackPositionalArgs("enumerate", args, kwargs, 1, &iterable, &start); err != nil { - return nil, err - } - - iter := iterable.Iterate() - defer iter.Done() - - var pairs []Value - var x Value - - if n := Len(iterable); n >= 0 { - // common case: known length - pairs = make([]Value, 0, n) - array := make(Tuple, 2*n) // allocate a single backing array - for i := 0; iter.Next(&x); i++ { - pair := array[:2:2] - array = array[2:] - pair[0] = MakeInt(start + i) - pair[1] = x - pairs = append(pairs, pair) - } - } else { - // non-sequence (unknown length) - for i := 0; iter.Next(&x); i++ { - pair := Tuple{MakeInt(start + i), x} - pairs = append(pairs, pair) - } - } - - return NewList(pairs), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#fail -func fail(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - sep := " " - if err := UnpackArgs("fail", nil, kwargs, "sep?", &sep); err != nil { - return nil, err - } - buf := new(strings.Builder) - buf.WriteString("fail: ") - for i, v := range args { - if i > 0 { - buf.WriteString(sep) - } - if s, ok := AsString(v); ok { - buf.WriteString(s) - } else { - writeValue(buf, v, nil) - } - } - - return nil, errors.New(buf.String()) -} - -func float(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("float does not accept keyword arguments") - } - if len(args) == 0 { - return Float(0.0), nil - } - if len(args) != 1 { - return nil, fmt.Errorf("float got %d arguments, wants 1", len(args)) - } - switch x := args[0].(type) { - case Bool: - if x { - return Float(1.0), nil - } else { - return Float(0.0), nil - } - case Int: - return x.finiteFloat() - case Float: - return x, nil - case String: - if x == "" { - return nil, fmt.Errorf("float: empty string") - } - // +/- NaN or Inf or Infinity (case insensitive)? - s := string(x) - switch x[len(x)-1] { - case 'y', 'Y': - if strings.EqualFold(s, "infinity") || strings.EqualFold(s, "+infinity") { - return inf, nil - } else if strings.EqualFold(s, "-infinity") { - return neginf, nil - } - case 'f', 'F': - if strings.EqualFold(s, "inf") || strings.EqualFold(s, "+inf") { - return inf, nil - } else if strings.EqualFold(s, "-inf") { - return neginf, nil - } - case 'n', 'N': - if strings.EqualFold(s, "nan") || strings.EqualFold(s, "+nan") || strings.EqualFold(s, "-nan") { - return nan, nil - } - } - f, err := strconv.ParseFloat(s, 64) - if math.IsInf(f, 0) { - return nil, fmt.Errorf("floating-point number too large") - } - if err != nil { - return nil, fmt.Errorf("invalid float literal: %s", s) - } - return Float(f), nil - default: - return nil, fmt.Errorf("float got %s, want number or string", x.Type()) - } -} - -var ( - inf = Float(math.Inf(+1)) - neginf = Float(math.Inf(-1)) - nan = Float(math.NaN()) -) - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#getattr -func getattr(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object, dflt Value - var name string - if err := UnpackPositionalArgs("getattr", args, kwargs, 2, &object, &name, &dflt); err != nil { - return nil, err - } - if object, ok := object.(HasAttrs); ok { - v, err := object.Attr(name) - if err != nil { - // An error could mean the field doesn't exist, - // or it exists but could not be computed. - if dflt != nil { - return dflt, nil - } - return nil, nameErr(b, err) - } - if v != nil { - return v, nil - } - // (nil, nil) => no such field - } - if dflt != nil { - return dflt, nil - } - return nil, fmt.Errorf("getattr: %s has no .%s field or method", object.Type(), name) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#hasattr -func hasattr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object Value - var name string - if err := UnpackPositionalArgs("hasattr", args, kwargs, 2, &object, &name); err != nil { - return nil, err - } - if object, ok := object.(HasAttrs); ok { - v, err := object.Attr(name) - if err == nil { - return Bool(v != nil), nil - } - - // An error does not conclusively indicate presence or - // absence of a field: it could occur while computing - // the value of a present attribute, or it could be a - // "no such attribute" error with details. - for _, x := range object.AttrNames() { - if x == name { - return True, nil - } - } - } - return False, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#hash -func hash(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("hash", args, kwargs, 1, &x); err != nil { - return nil, err - } - - var h int64 - switch x := x.(type) { - case String: - // The Starlark spec requires that the hash function be - // deterministic across all runs, motivated by the need - // for reproducibility of builds. Thus we cannot call - // String.Hash, which uses the fastest implementation - // available, because as varies across process restarts, - // and may evolve with the implementation. - h = int64(javaStringHash(string(x))) - case Bytes: - h = int64(softHashString(string(x))) // FNV32 - default: - return nil, fmt.Errorf("hash: got %s, want string or bytes", x.Type()) - } - return MakeInt64(h), nil -} - -// javaStringHash returns the same hash as would be produced by -// java.lang.String.hashCode. This requires transcoding the string to -// UTF-16; transcoding may introduce Unicode replacement characters -// U+FFFD if s does not contain valid UTF-8. -func javaStringHash(s string) (h int32) { - for _, r := range s { - if utf16.IsSurrogate(r) { - c1, c2 := utf16.EncodeRune(r) - h = 31*h + c1 - h = 31*h + c2 - } else { - h = 31*h + r // r may be U+FFFD - } - } - return h -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#int -func int_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value = zero - var base Value - if err := UnpackArgs("int", args, kwargs, "x", &x, "base?", &base); err != nil { - return nil, err - } - - if s, ok := AsString(x); ok { - b := 10 - if base != nil { - var err error - b, err = AsInt32(base) - if err != nil { - return nil, fmt.Errorf("int: for base, got %s, want int", base.Type()) - } - if b != 0 && (b < 2 || b > 36) { - return nil, fmt.Errorf("int: base must be an integer >= 2 && <= 36") - } - } - res := parseInt(s, b) - if res == nil { - return nil, fmt.Errorf("int: invalid literal with base %d: %s", b, s) - } - return res, nil - } - - if base != nil { - return nil, fmt.Errorf("int: can't convert non-string with explicit base") - } - - if b, ok := x.(Bool); ok { - if b { - return one, nil - } else { - return zero, nil - } - } - - i, err := NumberToInt(x) - if err != nil { - return nil, fmt.Errorf("int: %s", err) - } - return i, nil -} - -// parseInt defines the behavior of int(string, base=int). It returns nil on error. -func parseInt(s string, base int) Value { - // remove sign - var neg bool - if s != "" { - if s[0] == '+' { - s = s[1:] - } else if s[0] == '-' { - neg = true - s = s[1:] - } - } - - // remove optional base prefix - baseprefix := 0 - if len(s) > 1 && s[0] == '0' { - if len(s) > 2 { - switch s[1] { - case 'o', 'O': - baseprefix = 8 - case 'x', 'X': - baseprefix = 16 - case 'b', 'B': - baseprefix = 2 - } - } - if baseprefix != 0 { - // Remove the base prefix if it matches - // the explicit base, or if base=0. - if base == 0 || baseprefix == base { - base = baseprefix - s = s[2:] - } - } else { - // For automatic base detection, - // a string starting with zero - // must be all zeros. - // Thus we reject int("0755", 0). - if base == 0 { - for i := 1; i < len(s); i++ { - if s[i] != '0' { - return nil - } - } - return zero - } - } - } - if base == 0 { - base = 10 - } - - // we explicitly handled sign above. - // if a sign remains, it is invalid. - if s != "" && (s[0] == '-' || s[0] == '+') { - return nil - } - - // s has no sign or base prefix. - if i, ok := new(big.Int).SetString(s, base); ok { - res := MakeBigInt(i) - if neg { - res = zero.Sub(res) - } - return res - } - - return nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#len -func len_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("len", args, kwargs, 1, &x); err != nil { - return nil, err - } - len := Len(x) - if len < 0 { - return nil, fmt.Errorf("len: value of type %s has no len", x.Type()) - } - return MakeInt(len), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list -func list(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("list", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - var elems []Value - if iterable != nil { - iter := iterable.Iterate() - defer iter.Done() - if n := Len(iterable); n > 0 { - elems = make([]Value, 0, n) // preallocate if length known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#min -func minmax(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) == 0 { - return nil, fmt.Errorf("%s requires at least one positional argument", b.Name()) - } - var keyFunc Callable - if err := UnpackArgs(b.Name(), nil, kwargs, "key?", &keyFunc); err != nil { - return nil, err - } - var op syntax.Token - if b.Name() == "max" { - op = syntax.GT - } else { - op = syntax.LT - } - var iterable Value - if len(args) == 1 { - iterable = args[0] - } else { - iterable = args - } - iter := Iterate(iterable) - if iter == nil { - return nil, fmt.Errorf("%s: %s value is not iterable", b.Name(), iterable.Type()) - } - defer iter.Done() - var extremum Value - if !iter.Next(&extremum) { - return nil, nameErr(b, "argument is an empty sequence") - } - - var extremeKey Value - var keyargs Tuple - if keyFunc == nil { - extremeKey = extremum - } else { - keyargs = Tuple{extremum} - res, err := Call(thread, keyFunc, keyargs, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - extremeKey = res - } - - var x Value - for iter.Next(&x) { - var key Value - if keyFunc == nil { - key = x - } else { - keyargs[0] = x - res, err := Call(thread, keyFunc, keyargs, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - key = res - } - - if ok, err := Compare(op, key, extremeKey); err != nil { - return nil, nameErr(b, err) - } else if ok { - extremum = x - extremeKey = key - } - } - return extremum, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#ord -func ord(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("ord does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("ord: got %d arguments, want 1", len(args)) - } - switch x := args[0].(type) { - case String: - // ord(string) returns int value of sole rune. - s := string(x) - r, sz := utf8.DecodeRuneInString(s) - if sz == 0 || sz != len(s) { - n := utf8.RuneCountInString(s) - return nil, fmt.Errorf("ord: string encodes %d Unicode code points, want 1", n) - } - return MakeInt(int(r)), nil - - case Bytes: - // ord(bytes) returns int value of sole byte. - if len(x) != 1 { - return nil, fmt.Errorf("ord: bytes has length %d, want 1", len(x)) - } - return MakeInt(int(x[0])), nil - default: - return nil, fmt.Errorf("ord: got %s, want string or bytes", x.Type()) - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#print -func print(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - sep := " " - if err := UnpackArgs("print", nil, kwargs, "sep?", &sep); err != nil { - return nil, err - } - buf := new(strings.Builder) - for i, v := range args { - if i > 0 { - buf.WriteString(sep) - } - if s, ok := AsString(v); ok { - buf.WriteString(s) - } else if b, ok := v.(Bytes); ok { - buf.WriteString(string(b)) - } else { - writeValue(buf, v, nil) - } - } - - s := buf.String() - if thread.Print != nil { - thread.Print(thread, s) - } else { - fmt.Fprintln(os.Stderr, s) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#range -func range_(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var start, stop, step int - step = 1 - if err := UnpackPositionalArgs("range", args, kwargs, 1, &start, &stop, &step); err != nil { - return nil, err - } - - if len(args) == 1 { - // range(stop) - start, stop = 0, start - } - if step == 0 { - // we were given range(start, stop, 0) - return nil, nameErr(b, "step argument must not be zero") - } - - return rangeValue{start: start, stop: stop, step: step, len: rangeLen(start, stop, step)}, nil -} - -// A rangeValue is a comparable, immutable, indexable sequence of integers -// defined by the three parameters to a range(...) call. -// Invariant: step != 0. -type rangeValue struct{ start, stop, step, len int } - -var ( - _ Indexable = rangeValue{} - _ Sequence = rangeValue{} - _ Comparable = rangeValue{} - _ Sliceable = rangeValue{} -) - -func (r rangeValue) Len() int { return r.len } -func (r rangeValue) Index(i int) Value { return MakeInt(r.start + i*r.step) } -func (r rangeValue) Iterate() Iterator { return &rangeIterator{r, 0} } - -// rangeLen calculates the length of a range with the provided start, stop, and step. -// caller must ensure that step is non-zero. -func rangeLen(start, stop, step int) int { - switch { - case step > 0: - if stop > start { - return (stop-1-start)/step + 1 - } - case step < 0: - if start > stop { - return (start-1-stop)/-step + 1 - } - default: - panic("rangeLen: zero step") - } - return 0 -} - -func (r rangeValue) Slice(start, end, step int) Value { - newStart := r.start + r.step*start - newStop := r.start + r.step*end - newStep := r.step * step - return rangeValue{ - start: newStart, - stop: newStop, - step: newStep, - len: rangeLen(newStart, newStop, newStep), - } -} - -func (r rangeValue) Freeze() {} // immutable -func (r rangeValue) String() string { - if r.step != 1 { - return fmt.Sprintf("range(%d, %d, %d)", r.start, r.stop, r.step) - } else if r.start != 0 { - return fmt.Sprintf("range(%d, %d)", r.start, r.stop) - } else { - return fmt.Sprintf("range(%d)", r.stop) - } -} -func (r rangeValue) Type() string { return "range" } -func (r rangeValue) Truth() Bool { return r.len > 0 } -func (r rangeValue) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: range") } - -func (x rangeValue) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(rangeValue) - switch op { - case syntax.EQL: - return rangeEqual(x, y), nil - case syntax.NEQ: - return !rangeEqual(x, y), nil - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func rangeEqual(x, y rangeValue) bool { - // Two ranges compare equal if they denote the same sequence. - if x.len != y.len { - return false // sequences differ in length - } - if x.len == 0 { - return true // both sequences are empty - } - if x.start != y.start { - return false // first element differs - } - return x.len == 1 || x.step == y.step -} - -func (r rangeValue) contains(x Int) bool { - x32, err := AsInt32(x) - if err != nil { - return false // out of range - } - delta := x32 - r.start - quo, rem := delta/r.step, delta%r.step - return rem == 0 && 0 <= quo && quo < r.len -} - -type rangeIterator struct { - r rangeValue - i int -} - -func (it *rangeIterator) Next(p *Value) bool { - if it.i < it.r.len { - *p = it.r.Index(it.i) - it.i++ - return true - } - return false -} -func (*rangeIterator) Done() {} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#repr -func repr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("repr", args, kwargs, 1, &x); err != nil { - return nil, err - } - return String(x.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#reversed -func reversed(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("reversed", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var elems []Value - if n := Len(args[0]); n >= 0 { - elems = make([]Value, 0, n) // preallocate if length known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - n := len(elems) - for i := 0; i < n>>1; i++ { - elems[i], elems[n-1-i] = elems[n-1-i], elems[i] - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#set -func set(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("set", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - set := new(Set) - if iterable != nil { - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if err := set.Insert(x); err != nil { - return nil, nameErr(b, err) - } - } - } - return set, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#sorted -func sorted(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - // Oddly, Python's sorted permits all arguments to be positional, thus so do we. - var iterable Iterable - var key Callable - var reverse bool - if err := UnpackArgs("sorted", args, kwargs, - "iterable", &iterable, - "key?", &key, - "reverse?", &reverse, - ); err != nil { - return nil, err - } - - iter := iterable.Iterate() - defer iter.Done() - var values []Value - if n := Len(iterable); n > 0 { - values = make(Tuple, 0, n) // preallocate if length is known - } - var x Value - for iter.Next(&x) { - values = append(values, x) - } - - // Derive keys from values by applying key function. - var keys []Value - if key != nil { - keys = make([]Value, len(values)) - for i, v := range values { - k, err := Call(thread, key, Tuple{v}, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - keys[i] = k - } - } - - slice := &sortSlice{keys: keys, values: values} - if reverse { - sort.Stable(sort.Reverse(slice)) - } else { - sort.Stable(slice) - } - return NewList(slice.values), slice.err -} - -type sortSlice struct { - keys []Value // nil => values[i] is key - values []Value - err error -} - -func (s *sortSlice) Len() int { return len(s.values) } -func (s *sortSlice) Less(i, j int) bool { - keys := s.keys - if s.keys == nil { - keys = s.values - } - ok, err := Compare(syntax.LT, keys[i], keys[j]) - if err != nil { - s.err = err - } - return ok -} -func (s *sortSlice) Swap(i, j int) { - if s.keys != nil { - s.keys[i], s.keys[j] = s.keys[j], s.keys[i] - } - s.values[i], s.values[j] = s.values[j], s.values[i] -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#str -func str(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("str does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("str: got %d arguments, want exactly 1", len(args)) - } - switch x := args[0].(type) { - case String: - return x, nil - case Bytes: - // Invalid encodings are replaced by that of U+FFFD. - return String(utf8Transcode(string(x))), nil - default: - return String(x.String()), nil - } -} - -// utf8Transcode returns the UTF-8-to-UTF-8 transcoding of s. -// The effect is that each code unit that is part of an -// invalid sequence is replaced by U+FFFD. -func utf8Transcode(s string) string { - if utf8.ValidString(s) { - return s - } - var out strings.Builder - for _, r := range s { - out.WriteRune(r) - } - return out.String() -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#tuple -func tuple(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("tuple", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - if len(args) == 0 { - return Tuple(nil), nil - } - iter := iterable.Iterate() - defer iter.Done() - var elems Tuple - if n := Len(iterable); n > 0 { - elems = make(Tuple, 0, n) // preallocate if length is known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - return elems, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#type -func type_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("type does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("type: got %d arguments, want exactly 1", len(args)) - } - return String(args[0].Type()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#zip -func zip(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("zip does not accept keyword arguments") - } - rows, cols := 0, len(args) - iters := make([]Iterator, cols) - defer func() { - for _, iter := range iters { - if iter != nil { - iter.Done() - } - } - }() - for i, seq := range args { - it := Iterate(seq) - if it == nil { - return nil, fmt.Errorf("zip: argument #%d is not iterable: %s", i+1, seq.Type()) - } - iters[i] = it - n := Len(seq) - if i == 0 || n < rows { - rows = n // possibly -1 - } - } - var result []Value - if rows >= 0 { - // length known - result = make([]Value, rows) - array := make(Tuple, cols*rows) // allocate a single backing array - for i := 0; i < rows; i++ { - tuple := array[:cols:cols] - array = array[cols:] - for j, iter := range iters { - iter.Next(&tuple[j]) - } - result[i] = tuple - } - } else { - // length not known - outer: - for { - tuple := make(Tuple, cols) - for i, iter := range iters { - if !iter.Next(&tuple[i]) { - break outer - } - } - result = append(result, tuple) - } - } - return NewList(result), nil -} - -// ---- methods of built-in types --- - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get -func dict_get(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var key, dflt Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { - return nil, err - } - if v, ok, err := b.Receiver().(*Dict).Get(key); err != nil { - return nil, nameErr(b, err) - } else if ok { - return v, nil - } else if dflt != nil { - return dflt, nil - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear -func dict_clear(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return None, b.Receiver().(*Dict).Clear() -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items -func dict_items(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - items := b.Receiver().(*Dict).Items() - res := make([]Value, len(items)) - for i, item := range items { - res[i] = item // convert [2]Value to Value - } - return NewList(res), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys -func dict_keys(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return NewList(b.Receiver().(*Dict).Keys()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop -func dict_pop(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var k, d Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { - return nil, err - } - if v, found, err := b.Receiver().(*Dict).Delete(k); err != nil { - return nil, nameErr(b, err) // dict is frozen or key is unhashable - } else if found { - return v, nil - } else if d != nil { - return d, nil - } - return nil, nameErr(b, "missing key") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem -func dict_popitem(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := b.Receiver().(*Dict) - k, ok := recv.ht.first() - if !ok { - return nil, nameErr(b, "empty dict") - } - v, _, err := recv.Delete(k) - if err != nil { - return nil, nameErr(b, err) // dict is frozen - } - return Tuple{k, v}, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault -func dict_setdefault(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var key, dflt Value = nil, None - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { - return nil, err - } - dict := b.Receiver().(*Dict) - if v, ok, err := dict.Get(key); err != nil { - return nil, nameErr(b, err) - } else if ok { - return v, nil - } else if err := dict.SetKey(key, dflt); err != nil { - return nil, nameErr(b, err) - } else { - return dflt, nil - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_update(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) > 1 { - return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) - } - if err := updateDict(b.Receiver().(*Dict), args, kwargs); err != nil { - return nil, fmt.Errorf("update: %v", err) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_values(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - items := b.Receiver().(*Dict).Items() - res := make([]Value, len(items)) - for i, item := range items { - res[i] = item[1] - } - return NewList(res), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·append -func list_append(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &object); err != nil { - return nil, err - } - recv := b.Receiver().(*List) - if err := recv.checkMutable("append to"); err != nil { - return nil, nameErr(b, err) - } - recv.elems = append(recv.elems, object) - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·clear -func list_clear(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - if err := b.Receiver().(*List).Clear(); err != nil { - return nil, nameErr(b, err) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·extend -func list_extend(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { - return nil, err - } - if err := recv.checkMutable("extend"); err != nil { - return nil, nameErr(b, err) - } - listExtend(recv, iterable) - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·index -func list_index(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var value, start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value, &start_, &end_); err != nil { - return nil, err - } - - recv := b.Receiver().(*List) - start, end, err := indices(start_, end_, recv.Len()) - if err != nil { - return nil, nameErr(b, err) - } - - for i := start; i < end; i++ { - if eq, err := Equal(recv.elems[i], value); err != nil { - return nil, nameErr(b, err) - } else if eq { - return MakeInt(i), nil - } - } - return nil, nameErr(b, "value not in list") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·insert -func list_insert(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var index int - var object Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &index, &object); err != nil { - return nil, err - } - if err := recv.checkMutable("insert into"); err != nil { - return nil, nameErr(b, err) - } - - if index < 0 { - index += recv.Len() - } - - if index >= recv.Len() { - // end - recv.elems = append(recv.elems, object) - } else { - if index < 0 { - index = 0 // start - } - recv.elems = append(recv.elems, nil) - copy(recv.elems[index+1:], recv.elems[index:]) // slide up one - recv.elems[index] = object - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·remove -func list_remove(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var value Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value); err != nil { - return nil, err - } - if err := recv.checkMutable("remove from"); err != nil { - return nil, nameErr(b, err) - } - for i, elem := range recv.elems { - if eq, err := Equal(elem, value); err != nil { - return nil, fmt.Errorf("remove: %v", err) - } else if eq { - recv.elems = append(recv.elems[:i], recv.elems[i+1:]...) - return None, nil - } - } - return nil, fmt.Errorf("remove: element not found") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·pop -func list_pop(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver() - list := recv.(*List) - n := list.Len() - i := n - 1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &i); err != nil { - return nil, err - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return nil, nameErr(b, outOfRange(origI, n, list)) - } - if err := list.checkMutable("pop from"); err != nil { - return nil, nameErr(b, err) - } - res := list.elems[i] - list.elems = append(list.elems[:i], list.elems[i+1:]...) - return res, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·capitalize -func string_capitalize(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - s := string(b.Receiver().(String)) - res := new(strings.Builder) - res.Grow(len(s)) - for i, r := range s { - if i == 0 { - r = unicode.ToTitle(r) - } else { - r = unicode.ToLower(r) - } - res.WriteRune(r) - } - return String(res.String()), nil -} - -// string_iterable returns an unspecified iterable value whose iterator yields: -// - elems: successive 1-byte substrings -// - codepoints: successive substrings that encode a single Unicode code point. -// - elem_ords: numeric values of successive bytes -// - codepoint_ords: numeric values of successive Unicode code points -func string_iterable(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - s := b.Receiver().(String) - ords := b.Name()[len(b.Name())-2] == 'd' - codepoints := b.Name()[0] == 'c' - if codepoints { - return stringCodepoints{s, ords}, nil - } else { - return stringElems{s, ords}, nil - } -} - -// bytes_elems returns an unspecified iterable value whose -// iterator yields the int values of successive elements. -func bytes_elems(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return bytesIterable{b.Receiver().(Bytes)}, nil -} - -// A bytesIterable is an iterable returned by bytes.elems(), -// whose iterator yields a sequence of numeric bytes values. -type bytesIterable struct{ bytes Bytes } - -var _ Iterable = (*bytesIterable)(nil) - -func (bi bytesIterable) String() string { return bi.bytes.String() + ".elems()" } -func (bi bytesIterable) Type() string { return "bytes.elems" } -func (bi bytesIterable) Freeze() {} // immutable -func (bi bytesIterable) Truth() Bool { return True } -func (bi bytesIterable) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", bi.Type()) } -func (bi bytesIterable) Iterate() Iterator { return &bytesIterator{bi.bytes} } - -type bytesIterator struct{ bytes Bytes } - -func (it *bytesIterator) Next(p *Value) bool { - if it.bytes == "" { - return false - } - *p = MakeInt(int(it.bytes[0])) - it.bytes = it.bytes[1:] - return true -} - -func (*bytesIterator) Done() {} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·count -func string_count(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var sub string - var start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { - return nil, err - } - - recv := string(b.Receiver().(String)) - start, end, err := indices(start_, end_, len(recv)) - if err != nil { - return nil, nameErr(b, err) - } - - var slice string - if start < end { - slice = recv[start:end] - } - return MakeInt(strings.Count(slice, sub)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalnum -func string_isalnum(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsLetter(r) && !unicode.IsDigit(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalpha -func string_isalpha(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsLetter(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isdigit -func string_isdigit(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsDigit(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·islower -func string_islower(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - return Bool(isCasedString(recv) && recv == strings.ToLower(recv)), nil -} - -// isCasedString reports whether its argument contains any cased code points. -func isCasedString(s string) bool { - for _, r := range s { - if isCasedRune(r) { - return true - } - } - return false -} - -func isCasedRune(r rune) bool { - // It's unclear what the correct behavior is for a rune such as 'ffi', - // a lowercase letter with no upper or title case and no SimpleFold. - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || unicode.SimpleFold(r) != r -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isspace -func string_isspace(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsSpace(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·istitle -func string_istitle(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - - // Python semantics differ from x==strings.{To,}Title(x) in Go: - // "uppercase characters may only follow uncased characters and - // lowercase characters only cased ones." - var cased, prevCased bool - for _, r := range recv { - if 'A' <= r && r <= 'Z' || unicode.IsTitle(r) { // e.g. "Dž" - if prevCased { - return False, nil - } - prevCased = true - cased = true - } else if unicode.IsLower(r) { - if !prevCased { - return False, nil - } - prevCased = true - cased = true - } else if unicode.IsUpper(r) { - return False, nil - } else { - prevCased = false - } - } - return Bool(cased), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isupper -func string_isupper(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - return Bool(isCasedString(recv) && recv == strings.ToUpper(recv)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·find -func string_find(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, true, false) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·format -func string_format(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - format := string(b.Receiver().(String)) - var auto, manual bool // kinds of positional indexing used - buf := new(strings.Builder) - index := 0 - for { - literal := format - i := strings.IndexByte(format, '{') - if i >= 0 { - literal = format[:i] - } - - // Replace "}}" with "}" in non-field portion, rejecting a lone '}'. - for { - j := strings.IndexByte(literal, '}') - if j < 0 { - buf.WriteString(literal) - break - } - if len(literal) == j+1 || literal[j+1] != '}' { - return nil, fmt.Errorf("format: single '}' in format") - } - buf.WriteString(literal[:j+1]) - literal = literal[j+2:] - } - - if i < 0 { - break // end of format string - } - - if i+1 < len(format) && format[i+1] == '{' { - // "{{" means a literal '{' - buf.WriteByte('{') - format = format[i+2:] - continue - } - - format = format[i+1:] - i = strings.IndexByte(format, '}') - if i < 0 { - return nil, fmt.Errorf("format: unmatched '{' in format") - } - - var arg Value - conv := "s" - var spec string - - field := format[:i] - format = format[i+1:] - - var name string - if i := strings.IndexByte(field, '!'); i < 0 { - // "name" or "name:spec" - if i := strings.IndexByte(field, ':'); i < 0 { - name = field - } else { - name = field[:i] - spec = field[i+1:] - } - } else { - // "name!conv" or "name!conv:spec" - name = field[:i] - field = field[i+1:] - // "conv" or "conv:spec" - if i := strings.IndexByte(field, ':'); i < 0 { - conv = field - } else { - conv = field[:i] - spec = field[i+1:] - } - } - - if name == "" { - // "{}": automatic indexing - if manual { - return nil, fmt.Errorf("format: cannot switch from manual field specification to automatic field numbering") - } - auto = true - if index >= len(args) { - return nil, fmt.Errorf("format: tuple index out of range") - } - arg = args[index] - index++ - } else if num, ok := decimal(name); ok { - // positional argument - if auto { - return nil, fmt.Errorf("format: cannot switch from automatic field numbering to manual field specification") - } - manual = true - if num >= len(args) { - return nil, fmt.Errorf("format: tuple index out of range") - } else { - arg = args[num] - } - } else { - // keyword argument - for _, kv := range kwargs { - if string(kv[0].(String)) == name { - arg = kv[1] - break - } - } - if arg == nil { - // Starlark does not support Python's x.y or a[i] syntaxes, - // or nested use of {...}. - if strings.Contains(name, ".") { - return nil, fmt.Errorf("format: attribute syntax x.y is not supported in replacement fields: %s", name) - } - if strings.Contains(name, "[") { - return nil, fmt.Errorf("format: element syntax a[i] is not supported in replacement fields: %s", name) - } - if strings.Contains(name, "{") { - return nil, fmt.Errorf("format: nested replacement fields not supported") - } - return nil, fmt.Errorf("format: keyword %s not found", name) - } - } - - if spec != "" { - // Starlark does not support Python's format_spec features. - return nil, fmt.Errorf("format spec features not supported in replacement fields: %s", spec) - } - - switch conv { - case "s": - if str, ok := AsString(arg); ok { - buf.WriteString(str) - } else { - writeValue(buf, arg, nil) - } - case "r": - writeValue(buf, arg, nil) - default: - return nil, fmt.Errorf("format: unknown conversion %q", conv) - } - } - return String(buf.String()), nil -} - -// decimal interprets s as a sequence of decimal digits. -func decimal(s string) (x int, ok bool) { - n := len(s) - for i := 0; i < n; i++ { - digit := s[i] - '0' - if digit > 9 { - return 0, false - } - x = x*10 + int(digit) - if x < 0 { - return 0, false // underflow - } - } - return x, true -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·index -func string_index(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, false, false) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·join -func string_join(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - buf := new(strings.Builder) - var x Value - for i := 0; iter.Next(&x); i++ { - if i > 0 { - buf.WriteString(recv) - } - s, ok := AsString(x) - if !ok { - return nil, fmt.Errorf("join: in list, want string, got %s", x.Type()) - } - buf.WriteString(s) - } - return String(buf.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lower -func string_lower(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return String(strings.ToLower(string(b.Receiver().(String)))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·partition -func string_partition(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var sep string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sep); err != nil { - return nil, err - } - if sep == "" { - return nil, nameErr(b, "empty separator") - } - var i int - if b.Name()[0] == 'p' { - i = strings.Index(recv, sep) // partition - } else { - i = strings.LastIndex(recv, sep) // rpartition - } - tuple := make(Tuple, 0, 3) - if i < 0 { - if b.Name()[0] == 'p' { - tuple = append(tuple, String(recv), String(""), String("")) - } else { - tuple = append(tuple, String(""), String(""), String(recv)) - } - } else { - tuple = append(tuple, String(recv[:i]), String(sep), String(recv[i+len(sep):])) - } - return tuple, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·removeprefix -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·removesuffix -func string_removefix(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var fix string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &fix); err != nil { - return nil, err - } - if b.name[len("remove")] == 'p' { - recv = strings.TrimPrefix(recv, fix) - } else { - recv = strings.TrimSuffix(recv, fix) - } - return String(recv), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·replace -func string_replace(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var old, new string - count := -1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &old, &new, &count); err != nil { - return nil, err - } - return String(strings.Replace(recv, old, new, count)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rfind -func string_rfind(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, true, true) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rindex -func string_rindex(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, false, true) -} - -// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·startswith -// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·endswith -func string_startswith(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - var start, end Value = None, None - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &x, &start, &end); err != nil { - return nil, err - } - - // compute effective substring. - s := string(b.Receiver().(String)) - if start, end, err := indices(start, end, len(s)); err != nil { - return nil, nameErr(b, err) - } else { - if end < start { - end = start // => empty result - } - s = s[start:end] - } - - f := strings.HasPrefix - if b.Name()[0] == 'e' { // endswith - f = strings.HasSuffix - } - - switch x := x.(type) { - case Tuple: - for i, x := range x { - prefix, ok := AsString(x) - if !ok { - return nil, fmt.Errorf("%s: want string, got %s, for element %d", - b.Name(), x.Type(), i) - } - if f(s, prefix) { - return True, nil - } - } - return False, nil - case String: - return Bool(f(s, string(x))), nil - } - return nil, fmt.Errorf("%s: got %s, want string or tuple of string", b.Name(), x.Type()) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·strip -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lstrip -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rstrip -func string_strip(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var chars string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &chars); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - var s string - switch b.Name()[0] { - case 's': // strip - if chars != "" { - s = strings.Trim(recv, chars) - } else { - s = strings.TrimSpace(recv) - } - case 'l': // lstrip - if chars != "" { - s = strings.TrimLeft(recv, chars) - } else { - s = strings.TrimLeftFunc(recv, unicode.IsSpace) - } - case 'r': // rstrip - if chars != "" { - s = strings.TrimRight(recv, chars) - } else { - s = strings.TrimRightFunc(recv, unicode.IsSpace) - } - } - return String(s), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·title -func string_title(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - - s := string(b.Receiver().(String)) - - // Python semantics differ from x==strings.{To,}Title(x) in Go: - // "uppercase characters may only follow uncased characters and - // lowercase characters only cased ones." - buf := new(strings.Builder) - buf.Grow(len(s)) - var prevCased bool - for _, r := range s { - if prevCased { - r = unicode.ToLower(r) - } else { - r = unicode.ToTitle(r) - } - prevCased = isCasedRune(r) - buf.WriteRune(r) - } - return String(buf.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·upper -func string_upper(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return String(strings.ToUpper(string(b.Receiver().(String)))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·split -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rsplit -func string_split(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var sep_ Value - maxsplit := -1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &sep_, &maxsplit); err != nil { - return nil, err - } - - var res []string - - if sep_ == nil || sep_ == None { - // special case: split on whitespace - if maxsplit < 0 { - res = strings.Fields(recv) - } else if b.Name() == "split" { - res = splitspace(recv, maxsplit) - } else { // rsplit - res = rsplitspace(recv, maxsplit) - } - - } else if sep, ok := AsString(sep_); ok { - if sep == "" { - return nil, fmt.Errorf("split: empty separator") - } - // usual case: split on non-empty separator - if maxsplit < 0 { - res = strings.Split(recv, sep) - } else if b.Name() == "split" { - res = strings.SplitN(recv, sep, maxsplit+1) - } else { // rsplit - res = strings.Split(recv, sep) - if excess := len(res) - maxsplit; excess > 0 { - res[0] = strings.Join(res[:excess], sep) - res = append(res[:1], res[excess:]...) - } - } - - } else { - return nil, fmt.Errorf("split: got %s for separator, want string", sep_.Type()) - } - - list := make([]Value, len(res)) - for i, x := range res { - list[i] = String(x) - } - return NewList(list), nil -} - -// Precondition: max >= 0. -func rsplitspace(s string, max int) []string { - res := make([]string, 0, max+1) - end := -1 // index of field end, or -1 in a region of spaces. - for i := len(s); i > 0; { - r, sz := utf8.DecodeLastRuneInString(s[:i]) - if unicode.IsSpace(r) { - if end >= 0 { - if len(res) == max { - break // let this field run to the start - } - res = append(res, s[i:end]) - end = -1 - } - } else if end < 0 { - end = i - } - i -= sz - } - if end >= 0 { - res = append(res, s[:end]) - } - - resLen := len(res) - for i := 0; i < resLen/2; i++ { - res[i], res[resLen-1-i] = res[resLen-1-i], res[i] - } - - return res -} - -// Precondition: max >= 0. -func splitspace(s string, max int) []string { - var res []string - start := -1 // index of field start, or -1 in a region of spaces - for i, r := range s { - if unicode.IsSpace(r) { - if start >= 0 { - if len(res) == max { - break // let this field run to the end - } - res = append(res, s[start:i]) - start = -1 - } - } else if start == -1 { - start = i - } - } - if start >= 0 { - res = append(res, s[start:]) - } - return res -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·splitlines -func string_splitlines(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var keepends bool - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &keepends); err != nil { - return nil, err - } - var lines []string - if s := string(b.Receiver().(String)); s != "" { - // TODO(adonovan): handle CRLF correctly. - if keepends { - lines = strings.SplitAfter(s, "\n") - } else { - lines = strings.Split(s, "\n") - } - if strings.HasSuffix(s, "\n") { - lines = lines[:len(lines)-1] - } - } - list := make([]Value, len(lines)) - for i, x := range lines { - list[i] = String(x) - } - return NewList(list), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#set·union. -func set_union(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - union, err := b.Receiver().(*Set).Union(iter) - if err != nil { - return nil, nameErr(b, err) - } - return union, nil -} - -// Common implementation of string_{r}{find,index}. -func string_find_impl(b *Builtin, args Tuple, kwargs []Tuple, allowError, last bool) (Value, error) { - var sub string - var start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { - return nil, err - } - - s := string(b.Receiver().(String)) - start, end, err := indices(start_, end_, len(s)) - if err != nil { - return nil, nameErr(b, err) - } - var slice string - if start < end { - slice = s[start:end] - } - - var i int - if last { - i = strings.LastIndex(slice, sub) - } else { - i = strings.Index(slice, sub) - } - if i < 0 { - if !allowError { - return nil, nameErr(b, "substring not found") - } - return MakeInt(-1), nil - } - return MakeInt(i + start), nil -} - -// Common implementation of builtin dict function and dict.update method. -// Precondition: len(updates) == 0 or 1. -func updateDict(dict *Dict, updates Tuple, kwargs []Tuple) error { - if len(updates) == 1 { - switch updates := updates[0].(type) { - case IterableMapping: - // Iterate over dict's key/value pairs, not just keys. - for _, item := range updates.Items() { - if err := dict.SetKey(item[0], item[1]); err != nil { - return err // dict is frozen - } - } - default: - // all other sequences - iter := Iterate(updates) - if iter == nil { - return fmt.Errorf("got %s, want iterable", updates.Type()) - } - defer iter.Done() - var pair Value - for i := 0; iter.Next(&pair); i++ { - iter2 := Iterate(pair) - if iter2 == nil { - return fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) - - } - defer iter2.Done() - len := Len(pair) - if len < 0 { - return fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) - } else if len != 2 { - return fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) - } - var k, v Value - iter2.Next(&k) - iter2.Next(&v) - if err := dict.SetKey(k, v); err != nil { - return err - } - } - } - } - - // Then add the kwargs. - before := dict.Len() - for _, pair := range kwargs { - if err := dict.SetKey(pair[0], pair[1]); err != nil { - return err // dict is frozen - } - } - // In the common case, each kwarg will add another dict entry. - // If that's not so, check whether it is because there was a duplicate kwarg. - if dict.Len() < before+len(kwargs) { - keys := make(map[String]bool, len(kwargs)) - for _, kv := range kwargs { - k := kv[0].(String) - if keys[k] { - return fmt.Errorf("duplicate keyword arg: %v", k) - } - keys[k] = true - } - } - - return nil -} - -// nameErr returns an error message of the form "name: msg" -// where name is b.Name() and msg is a string or error. -func nameErr(b *Builtin, msg interface{}) error { - return fmt.Errorf("%s: %v", b.Name(), msg) -} diff --git a/vendor/go.starlark.net/starlark/profile.go b/vendor/go.starlark.net/starlark/profile.go deleted file mode 100644 index 38da2b2e9..000000000 --- a/vendor/go.starlark.net/starlark/profile.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2019 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -// This file defines a simple execution-time profiler for Starlark. -// It measures the wall time spent executing Starlark code, and emits a -// gzipped protocol message in pprof format (github.com/google/pprof). -// -// When profiling is enabled, the interpreter calls the profiler to -// indicate the start and end of each "span" or time interval. A leaf -// function (whether Go or Starlark) has a single span. A function that -// calls another function has spans for each interval in which it is the -// top of the stack. (A LOAD instruction also ends a span.) -// -// At the start of a span, the interpreter records the current time in -// the thread's topmost frame. At the end of the span, it obtains the -// time again and subtracts the span start time. The difference is added -// to an accumulator variable in the thread. If the accumulator exceeds -// some fixed quantum (10ms, say), the profiler records the current call -// stack and sends it to the profiler goroutine, along with the number -// of quanta, which are subtracted. For example, if the accumulator -// holds 3ms and then a completed span adds 25ms to it, its value is 28ms, -// which exceeeds 10ms. The profiler records a stack with the value 20ms -// (2 quanta), and the accumulator is left with 8ms. -// -// The profiler goroutine converts the stacks into the pprof format and -// emits a gzip-compressed protocol message to the designated output -// file. We use a hand-written streaming proto encoder to avoid -// dependencies on pprof and proto, and to avoid the need to -// materialize the profile data structure in memory. -// -// A limitation of this profiler is that it measures wall time, which -// does not necessarily correspond to CPU time. A CPU profiler requires -// that only running (not runnable) threads are sampled; this is -// commonly achieved by having the kernel deliver a (PROF) signal to an -// arbitrary running thread, through setitimer(2). The CPU profiler in the -// Go runtime uses this mechanism, but it is not possible for a Go -// application to register a SIGPROF handler, nor is it possible for a -// Go handler for some other signal to read the stack pointer of -// the interrupted thread. -// -// Two caveats: -// (1) it is tempting to send the leaf Frame directly to the profiler -// goroutine instead of making a copy of the stack, since a Frame is a -// spaghetti stack--a linked list. However, as soon as execution -// resumes, the stack's Frame.pc values may be mutated, so Frames are -// not safe to share with the asynchronous profiler goroutine. -// (2) it is tempting to use Callables as keys in a map when tabulating -// the pprof protocols's Function entities. However, we cannot assume -// that Callables are valid map keys, and furthermore we must not -// pin function values in memory indefinitely as this may cause lambda -// values to keep their free variables live much longer than necessary. - -// TODO(adonovan): -// - make Start/Stop fully thread-safe. -// - fix the pc hack. -// - experiment with other values of quantum. - -import ( - "bufio" - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "log" - "reflect" - "sync/atomic" - "time" - "unsafe" - - "go.starlark.net/syntax" -) - -// StartProfile enables time profiling of all Starlark threads, -// and writes a profile in pprof format to w. -// It must be followed by a call to StopProfiler to stop -// the profiler and finalize the profile. -// -// StartProfile returns an error if profiling was already enabled. -// -// StartProfile must not be called concurrently with Starlark execution. -func StartProfile(w io.Writer) error { - if !atomic.CompareAndSwapUint32(&profiler.on, 0, 1) { - return fmt.Errorf("profiler already running") - } - - // TODO(adonovan): make the API fully concurrency-safe. - // The main challenge is racy reads/writes of profiler.events, - // and of send/close races on the channel it refers to. - // It's easy to solve them with a mutex but harder to do - // it efficiently. - - profiler.events = make(chan *profEvent, 1) - profiler.done = make(chan error) - - go profile(w) - - return nil -} - -// StopProfiler stops the profiler started by a prior call to -// StartProfile and finalizes the profile. It returns an error if the -// profile could not be completed. -// -// StopProfiler must not be called concurrently with Starlark execution. -func StopProfile() error { - // Terminate the profiler goroutine and get its result. - close(profiler.events) - err := <-profiler.done - - profiler.done = nil - profiler.events = nil - atomic.StoreUint32(&profiler.on, 0) - - return err -} - -// globals -var profiler struct { - on uint32 // nonzero => profiler running - events chan *profEvent // profile events from interpreter threads - done chan error // indicates profiler goroutine is ready -} - -func (thread *Thread) beginProfSpan() { - if profiler.events == nil { - return // profiling not enabled - } - - thread.frameAt(0).spanStart = nanotime() -} - -// TODO(adonovan): experiment with smaller values, -// which trade space and time for greater precision. -const quantum = 10 * time.Millisecond - -func (thread *Thread) endProfSpan() { - if profiler.events == nil { - return // profiling not enabled - } - - // Add the span to the thread's accumulator. - thread.proftime += time.Duration(nanotime() - thread.frameAt(0).spanStart) - if thread.proftime < quantum { - return - } - - // Only record complete quanta. - n := thread.proftime / quantum - thread.proftime -= n * quantum - - // Copy the stack. - // (We can't save thread.frame because its pc will change.) - ev := &profEvent{ - thread: thread, - time: n * quantum, - } - ev.stack = ev.stackSpace[:0] - for i := range thread.stack { - fr := thread.frameAt(i) - ev.stack = append(ev.stack, profFrame{ - pos: fr.Position(), - fn: fr.Callable(), - pc: fr.pc, - }) - } - - profiler.events <- ev -} - -type profEvent struct { - thread *Thread // currently unused - time time.Duration - stack []profFrame - stackSpace [8]profFrame // initial space for stack -} - -type profFrame struct { - fn Callable // don't hold this live for too long (prevents GC of lambdas) - pc uint32 // program counter (Starlark frames only) - pos syntax.Position // position of pc within this frame -} - -// profile is the profiler goroutine. -// It runs until StopProfiler is called. -func profile(w io.Writer) { - // Field numbers from pprof protocol. - // See https://github.com/google/pprof/blob/master/proto/profile.proto - const ( - Profile_sample_type = 1 // repeated ValueType - Profile_sample = 2 // repeated Sample - Profile_mapping = 3 // repeated Mapping - Profile_location = 4 // repeated Location - Profile_function = 5 // repeated Function - Profile_string_table = 6 // repeated string - Profile_time_nanos = 9 // int64 - Profile_duration_nanos = 10 // int64 - Profile_period_type = 11 // ValueType - Profile_period = 12 // int64 - - ValueType_type = 1 // int64 - ValueType_unit = 2 // int64 - - Sample_location_id = 1 // repeated uint64 - Sample_value = 2 // repeated int64 - Sample_label = 3 // repeated Label - - Label_key = 1 // int64 - Label_str = 2 // int64 - Label_num = 3 // int64 - Label_num_unit = 4 // int64 - - Location_id = 1 // uint64 - Location_mapping_id = 2 // uint64 - Location_address = 3 // uint64 - Location_line = 4 // repeated Line - - Line_function_id = 1 // uint64 - Line_line = 2 // int64 - - Function_id = 1 // uint64 - Function_name = 2 // int64 - Function_system_name = 3 // int64 - Function_filename = 4 // int64 - Function_start_line = 5 // int64 - ) - - bufw := bufio.NewWriter(w) // write file in 4KB (not 240B flate-sized) chunks - gz := gzip.NewWriter(bufw) - enc := protoEncoder{w: gz} - - // strings - stringIndex := make(map[string]int64) - str := func(s string) int64 { - i, ok := stringIndex[s] - if !ok { - i = int64(len(stringIndex)) - enc.string(Profile_string_table, s) - stringIndex[s] = i - } - return i - } - str("") // entry 0 - - // functions - // - // function returns the ID of a Callable for use in Line.FunctionId. - // The ID is the same as the function's logical address, - // which is supplied by the caller to avoid the need to recompute it. - functionId := make(map[uintptr]uint64) - function := func(fn Callable, addr uintptr) uint64 { - id, ok := functionId[addr] - if !ok { - id = uint64(addr) - - var pos syntax.Position - if fn, ok := fn.(callableWithPosition); ok { - pos = fn.Position() - } - - name := fn.Name() - if name == "" { - name = pos.Filename() - } - - nameIndex := str(name) - - fun := new(bytes.Buffer) - funenc := protoEncoder{w: fun} - funenc.uint(Function_id, id) - funenc.int(Function_name, nameIndex) - funenc.int(Function_system_name, nameIndex) - funenc.int(Function_filename, str(pos.Filename())) - funenc.int(Function_start_line, int64(pos.Line)) - enc.bytes(Profile_function, fun.Bytes()) - - functionId[addr] = id - } - return id - } - - // locations - // - // location returns the ID of the location denoted by fr. - // For Starlark frames, this is the Frame pc. - locationId := make(map[uintptr]uint64) - location := func(fr profFrame) uint64 { - fnAddr := profFuncAddr(fr.fn) - - // For Starlark functions, the frame position - // represents the current PC value. - // Mix it into the low bits of the address. - // This is super hacky and may result in collisions - // in large functions or if functions are numerous. - // TODO(adonovan): fix: try making this cleaner by treating - // each bytecode segment as a Profile.Mapping. - pcAddr := fnAddr - if _, ok := fr.fn.(*Function); ok { - pcAddr = (pcAddr << 16) ^ uintptr(fr.pc) - } - - id, ok := locationId[pcAddr] - if !ok { - id = uint64(pcAddr) - - line := new(bytes.Buffer) - lineenc := protoEncoder{w: line} - lineenc.uint(Line_function_id, function(fr.fn, fnAddr)) - lineenc.int(Line_line, int64(fr.pos.Line)) - loc := new(bytes.Buffer) - locenc := protoEncoder{w: loc} - locenc.uint(Location_id, id) - locenc.uint(Location_address, uint64(pcAddr)) - locenc.bytes(Location_line, line.Bytes()) - enc.bytes(Profile_location, loc.Bytes()) - - locationId[pcAddr] = id - } - return id - } - - wallNanos := new(bytes.Buffer) - wnenc := protoEncoder{w: wallNanos} - wnenc.int(ValueType_type, str("wall")) - wnenc.int(ValueType_unit, str("nanoseconds")) - - // informational fields of Profile - enc.bytes(Profile_sample_type, wallNanos.Bytes()) - enc.int(Profile_period, quantum.Nanoseconds()) // magnitude of sampling period - enc.bytes(Profile_period_type, wallNanos.Bytes()) // dimension and unit of period - enc.int(Profile_time_nanos, time.Now().UnixNano()) // start (real) time of profile - - startNano := nanotime() - - // Read profile events from the channel - // until it is closed by StopProfiler. - for e := range profiler.events { - sample := new(bytes.Buffer) - sampleenc := protoEncoder{w: sample} - sampleenc.int(Sample_value, e.time.Nanoseconds()) // wall nanoseconds - for _, fr := range e.stack { - sampleenc.uint(Sample_location_id, location(fr)) - } - enc.bytes(Profile_sample, sample.Bytes()) - } - - endNano := nanotime() - enc.int(Profile_duration_nanos, endNano-startNano) - - err := gz.Close() // Close reports any prior write error - if flushErr := bufw.Flush(); err == nil { - err = flushErr - } - profiler.done <- err -} - -// nanotime returns the time in nanoseconds since epoch. -// It is implemented by runtime.nanotime using the linkname hack; -// runtime.nanotime is defined for all OSs/ARCHS and uses the -// monotonic system clock, which there is no portable way to access. -// Should that function ever go away, these alternatives exist: -// -// // POSIX only. REALTIME not MONOTONIC. 17ns. -// var tv syscall.Timeval -// syscall.Gettimeofday(&tv) // can't fail -// return tv.Nano() -// -// // Portable. REALTIME not MONOTONIC. 46ns. -// return time.Now().Nanoseconds() -// -// // POSIX only. Adds a dependency. -// import "golang.org/x/sys/unix" -// var ts unix.Timespec -// unix.ClockGettime(CLOCK_MONOTONIC, &ts) // can't fail -// return unix.TimespecToNsec(ts) -// -//go:linkname nanotime runtime.nanotime -func nanotime() int64 - -// profFuncAddr returns the canonical "address" -// of a Callable for use by the profiler. -func profFuncAddr(fn Callable) uintptr { - switch fn := fn.(type) { - case *Builtin: - return reflect.ValueOf(fn.fn).Pointer() - case *Function: - return uintptr(unsafe.Pointer(fn.funcode)) - } - - // User-defined callable types are typically of - // of kind pointer-to-struct. Handle them specially. - if v := reflect.ValueOf(fn); v.Type().Kind() == reflect.Ptr { - return v.Pointer() - } - - // Address zero is reserved by the protocol. - // Use 1 for callables we don't recognize. - log.Printf("Starlark profiler: no address for Callable %T", fn) - return 1 -} - -// We encode the protocol message by hand to avoid making -// the interpreter depend on both github.com/google/pprof -// and github.com/golang/protobuf. -// -// This also avoids the need to materialize a protocol message object -// tree of unbounded size and serialize it all at the end. -// The pprof format appears to have been designed to -// permit streaming implementations such as this one. -// -// See https://developers.google.com/protocol-buffers/docs/encoding. -type protoEncoder struct { - w io.Writer // *bytes.Buffer or *gzip.Writer - tmp [binary.MaxVarintLen64]byte -} - -func (e *protoEncoder) uvarint(x uint64) { - n := binary.PutUvarint(e.tmp[:], x) - e.w.Write(e.tmp[:n]) -} - -func (e *protoEncoder) tag(field, wire uint) { - e.uvarint(uint64(field<<3 | wire)) -} - -func (e *protoEncoder) string(field uint, s string) { - e.tag(field, 2) // length-delimited - e.uvarint(uint64(len(s))) - io.WriteString(e.w, s) -} - -func (e *protoEncoder) bytes(field uint, b []byte) { - e.tag(field, 2) // length-delimited - e.uvarint(uint64(len(b))) - e.w.Write(b) -} - -func (e *protoEncoder) uint(field uint, x uint64) { - e.tag(field, 0) // varint - e.uvarint(x) -} - -func (e *protoEncoder) int(field uint, x int64) { - e.tag(field, 0) // varint - e.uvarint(uint64(x)) -} diff --git a/vendor/go.starlark.net/starlark/unpack.go b/vendor/go.starlark.net/starlark/unpack.go deleted file mode 100644 index 316858992..000000000 --- a/vendor/go.starlark.net/starlark/unpack.go +++ /dev/null @@ -1,355 +0,0 @@ -package starlark - -// This file defines the Unpack helper functions used by -// built-in functions to interpret their call arguments. - -import ( - "fmt" - "log" - "reflect" - "strings" - - "go.starlark.net/internal/spell" -) - -// An Unpacker defines custom argument unpacking behavior. -// See UnpackArgs. -type Unpacker interface { - Unpack(v Value) error -} - -// UnpackArgs unpacks the positional and keyword arguments into the -// supplied parameter variables. pairs is an alternating list of names -// and pointers to variables. -// -// If the variable is a bool, integer, string, *List, *Dict, Callable, -// Iterable, or user-defined implementation of Value, -// UnpackArgs performs the appropriate type check. -// Predeclared Go integer types uses the AsInt check. -// -// If the parameter name ends with "?", it is optional. -// -// If the parameter name ends with "??", it is optional and treats the None value -// as if the argument was absent. -// -// If a parameter is marked optional, then all following parameters are -// implicitly optional where or not they are marked. -// -// If the variable implements Unpacker, its Unpack argument -// is called with the argument value, allowing an application -// to define its own argument validation and conversion. -// -// If the variable implements Value, UnpackArgs may call -// its Type() method while constructing the error message. -// -// Examples: -// -// var ( -// a Value -// b = MakeInt(42) -// c Value = starlark.None -// ) -// -// // 1. mixed parameters, like def f(a, b=42, c=None). -// err := UnpackArgs("f", args, kwargs, "a", &a, "b?", &b, "c?", &c) -// -// // 2. keyword parameters only, like def f(*, a, b, c=None). -// if len(args) > 0 { -// return fmt.Errorf("f: unexpected positional arguments") -// } -// err := UnpackArgs("f", args, kwargs, "a", &a, "b?", &b, "c?", &c) -// -// // 3. positional parameters only, like def f(a, b=42, c=None, /) in Python 3.8. -// err := UnpackPositionalArgs("f", args, kwargs, 1, &a, &b, &c) -// -// More complex forms such as def f(a, b=42, *args, c, d=123, **kwargs) -// require additional logic, but their need in built-ins is exceedingly rare. -// -// In the examples above, the declaration of b with type Int causes UnpackArgs -// to require that b's argument value, if provided, is also an int. -// To allow arguments of any type, while retaining the default value of 42, -// declare b as a Value: -// -// var b Value = MakeInt(42) -// -// The zero value of a variable of type Value, such as 'a' in the -// examples above, is not a valid Starlark value, so if the parameter is -// optional, the caller must explicitly handle the default case by -// interpreting nil as None or some computed default. The same is true -// for the zero values of variables of type *List, *Dict, Callable, or -// Iterable. For example: -// -// // def myfunc(d=None, e=[], f={}) -// var ( -// d Value -// e *List -// f *Dict -// ) -// err := UnpackArgs("myfunc", args, kwargs, "d?", &d, "e?", &e, "f?", &f) -// if d == nil { d = None; } -// if e == nil { e = new(List); } -// if f == nil { f = new(Dict); } -// -func UnpackArgs(fnname string, args Tuple, kwargs []Tuple, pairs ...interface{}) error { - nparams := len(pairs) / 2 - var defined intset - defined.init(nparams) - - paramName := func(x interface{}) (name string, skipNone bool) { // (no free variables) - name = x.(string) - if strings.HasSuffix(name, "??") { - name = strings.TrimSuffix(name, "??") - skipNone = true - } else if name[len(name)-1] == '?' { - name = name[:len(name)-1] - } - - return name, skipNone - } - - // positional arguments - if len(args) > nparams { - return fmt.Errorf("%s: got %d arguments, want at most %d", - fnname, len(args), nparams) - } - for i, arg := range args { - defined.set(i) - name, skipNone := paramName(pairs[2*i]) - if skipNone { - if _, isNone := arg.(NoneType); isNone { - continue - } - } - if err := unpackOneArg(arg, pairs[2*i+1]); err != nil { - return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) - } - } - - // keyword arguments -kwloop: - for _, item := range kwargs { - name, arg := item[0].(String), item[1] - for i := 0; i < nparams; i++ { - pName, skipNone := paramName(pairs[2*i]) - if pName == string(name) { - // found it - if defined.set(i) { - return fmt.Errorf("%s: got multiple values for keyword argument %s", - fnname, name) - } - - if skipNone { - if _, isNone := arg.(NoneType); isNone { - continue kwloop - } - } - - ptr := pairs[2*i+1] - if err := unpackOneArg(arg, ptr); err != nil { - return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) - } - continue kwloop - } - } - err := fmt.Errorf("%s: unexpected keyword argument %s", fnname, name) - names := make([]string, 0, nparams) - for i := 0; i < nparams; i += 2 { - param, _ := paramName(pairs[i]) - names = append(names, param) - } - if n := spell.Nearest(string(name), names); n != "" { - err = fmt.Errorf("%s (did you mean %s?)", err.Error(), n) - } - return err - } - - // Check that all non-optional parameters are defined. - // (We needn't check the first len(args).) - for i := len(args); i < nparams; i++ { - name := pairs[2*i].(string) - if strings.HasSuffix(name, "?") { - break // optional - } - if !defined.get(i) { - return fmt.Errorf("%s: missing argument for %s", fnname, name) - } - } - - return nil -} - -// UnpackPositionalArgs unpacks the positional arguments into -// corresponding variables. Each element of vars is a pointer; see -// UnpackArgs for allowed types and conversions. -// -// UnpackPositionalArgs reports an error if the number of arguments is -// less than min or greater than len(vars), if kwargs is nonempty, or if -// any conversion fails. -// -// See UnpackArgs for general comments. -func UnpackPositionalArgs(fnname string, args Tuple, kwargs []Tuple, min int, vars ...interface{}) error { - if len(kwargs) > 0 { - return fmt.Errorf("%s: unexpected keyword arguments", fnname) - } - max := len(vars) - if len(args) < min { - var atleast string - if min < max { - atleast = "at least " - } - return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atleast, min) - } - if len(args) > max { - var atmost string - if max > min { - atmost = "at most " - } - return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atmost, max) - } - for i, arg := range args { - if err := unpackOneArg(arg, vars[i]); err != nil { - return fmt.Errorf("%s: for parameter %d: %s", fnname, i+1, err) - } - } - return nil -} - -func unpackOneArg(v Value, ptr interface{}) error { - // On failure, don't clobber *ptr. - switch ptr := ptr.(type) { - case Unpacker: - return ptr.Unpack(v) - case *Value: - *ptr = v - case *string: - s, ok := AsString(v) - if !ok { - return fmt.Errorf("got %s, want string", v.Type()) - } - *ptr = s - case *bool: - b, ok := v.(Bool) - if !ok { - return fmt.Errorf("got %s, want bool", v.Type()) - } - *ptr = bool(b) - case *int, *int8, *int16, *int32, *int64, - *uint, *uint8, *uint16, *uint32, *uint64, *uintptr: - return AsInt(v, ptr) - case *float64: - f, ok := v.(Float) - if !ok { - return fmt.Errorf("got %s, want float", v.Type()) - } - *ptr = float64(f) - case **List: - list, ok := v.(*List) - if !ok { - return fmt.Errorf("got %s, want list", v.Type()) - } - *ptr = list - case **Dict: - dict, ok := v.(*Dict) - if !ok { - return fmt.Errorf("got %s, want dict", v.Type()) - } - *ptr = dict - case *Callable: - f, ok := v.(Callable) - if !ok { - return fmt.Errorf("got %s, want callable", v.Type()) - } - *ptr = f - case *Iterable: - it, ok := v.(Iterable) - if !ok { - return fmt.Errorf("got %s, want iterable", v.Type()) - } - *ptr = it - default: - // v must have type *V, where V is some subtype of starlark.Value. - ptrv := reflect.ValueOf(ptr) - if ptrv.Kind() != reflect.Ptr { - log.Panicf("internal error: not a pointer: %T", ptr) - } - paramVar := ptrv.Elem() - if !reflect.TypeOf(v).AssignableTo(paramVar.Type()) { - // The value is not assignable to the variable. - - // Detect a possible bug in the Go program that called Unpack: - // If the variable *ptr is not a subtype of Value, - // no value of v can possibly work. - if !paramVar.Type().AssignableTo(reflect.TypeOf(new(Value)).Elem()) { - log.Panicf("pointer element type does not implement Value: %T", ptr) - } - - // Report Starlark dynamic type error. - // - // We prefer the Starlark Value.Type name over - // its Go reflect.Type name, but calling the - // Value.Type method on the variable is not safe - // in general. If the variable is an interface, - // the call will fail. Even if the variable has - // a concrete type, it might not be safe to call - // Type() on a zero instance. Thus we must use - // recover. - - // Default to Go reflect.Type name - paramType := paramVar.Type().String() - - // Attempt to call Value.Type method. - func() { - defer func() { recover() }() - if typer, _ := paramVar.Interface().(interface{ Type() string }); typer != nil { - paramType = typer.Type() - } - }() - return fmt.Errorf("got %s, want %s", v.Type(), paramType) - } - paramVar.Set(reflect.ValueOf(v)) - } - return nil -} - -type intset struct { - small uint64 // bitset, used if n < 64 - large map[int]bool // set, used if n >= 64 -} - -func (is *intset) init(n int) { - if n >= 64 { - is.large = make(map[int]bool) - } -} - -func (is *intset) set(i int) (prev bool) { - if is.large == nil { - prev = is.small&(1< Hash(x) == Hash(y). - // Hash may fail if the value's type is not hashable, or if the value - // contains a non-hashable value. The hash is used only by dictionaries and - // is not exposed to the Starlark program. - Hash() (uint32, error) -} - -// A Comparable is a value that defines its own equivalence relation and -// perhaps ordered comparisons. -type Comparable interface { - Value - // CompareSameType compares one value to another of the same Type(). - // The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. - // CompareSameType returns an error if an ordered comparison was - // requested for a type that does not support it. - // - // Implementations that recursively compare subcomponents of - // the value should use the CompareDepth function, not Compare, to - // avoid infinite recursion on cyclic structures. - // - // The depth parameter is used to bound comparisons of cyclic - // data structures. Implementations should decrement depth - // before calling CompareDepth and should return an error if depth - // < 1. - // - // Client code should not call this method. Instead, use the - // standalone Compare or Equals functions, which are defined for - // all pairs of operands. - CompareSameType(op syntax.Token, y Value, depth int) (bool, error) -} - -// A TotallyOrdered is a type whose values form a total order: -// if x and y are of the same TotallyOrdered type, then x must be less than y, -// greater than y, or equal to y. -// -// It is simpler than Comparable and should be preferred in new code, -// but if a type implements both interfaces, Comparable takes precedence. -type TotallyOrdered interface { - Value - // Cmp compares two values x and y of the same totally ordered type. - // It returns negative if x < y, positive if x > y, and zero if the values are equal. - // - // Implementations that recursively compare subcomponents of - // the value should use the CompareDepth function, not Cmp, to - // avoid infinite recursion on cyclic structures. - // - // The depth parameter is used to bound comparisons of cyclic - // data structures. Implementations should decrement depth - // before calling CompareDepth and should return an error if depth - // < 1. - // - // Client code should not call this method. Instead, use the - // standalone Compare or Equals functions, which are defined for - // all pairs of operands. - Cmp(y Value, depth int) (int, error) -} - -var ( - _ TotallyOrdered = Int{} - _ TotallyOrdered = Float(0) - _ Comparable = False - _ Comparable = String("") - _ Comparable = (*Dict)(nil) - _ Comparable = (*List)(nil) - _ Comparable = Tuple(nil) - _ Comparable = (*Set)(nil) -) - -// A Callable value f may be the operand of a function call, f(x). -// -// Clients should use the Call function, never the CallInternal method. -type Callable interface { - Value - Name() string - CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) -} - -type callableWithPosition interface { - Callable - Position() syntax.Position -} - -var ( - _ Callable = (*Builtin)(nil) - _ Callable = (*Function)(nil) - _ callableWithPosition = (*Function)(nil) -) - -// An Iterable abstracts a sequence of values. -// An iterable value may be iterated over by a 'for' loop or used where -// any other Starlark iterable is allowed. Unlike a Sequence, the length -// of an Iterable is not necessarily known in advance of iteration. -type Iterable interface { - Value - Iterate() Iterator // must be followed by call to Iterator.Done -} - -// A Sequence is a sequence of values of known length. -type Sequence interface { - Iterable - Len() int -} - -var ( - _ Sequence = (*Dict)(nil) - _ Sequence = (*Set)(nil) -) - -// An Indexable is a sequence of known length that supports efficient random access. -// It is not necessarily iterable. -type Indexable interface { - Value - Index(i int) Value // requires 0 <= i < Len() - Len() int -} - -// A Sliceable is a sequence that can be cut into pieces with the slice operator (x[i:j:step]). -// -// All native indexable objects are sliceable. -// This is a separate interface for backwards-compatibility. -type Sliceable interface { - Indexable - // For positive strides (step > 0), 0 <= start <= end <= n. - // For negative strides (step < 0), -1 <= end <= start < n. - // The caller must ensure that the start and end indices are valid - // and that step is non-zero. - Slice(start, end, step int) Value -} - -// A HasSetIndex is an Indexable value whose elements may be assigned (x[i] = y). -// -// The implementation should not add Len to a negative index as the -// evaluator does this before the call. -type HasSetIndex interface { - Indexable - SetIndex(index int, v Value) error -} - -var ( - _ HasSetIndex = (*List)(nil) - _ Indexable = Tuple(nil) - _ Indexable = String("") - _ Sliceable = Tuple(nil) - _ Sliceable = String("") - _ Sliceable = (*List)(nil) -) - -// An Iterator provides a sequence of values to the caller. -// -// The caller must call Done when the iterator is no longer needed. -// Operations that modify a sequence will fail if it has active iterators. -// -// Example usage: -// -// iter := iterable.Iterator() -// defer iter.Done() -// var x Value -// for iter.Next(&x) { -// ... -// } -type Iterator interface { - // If the iterator is exhausted, Next returns false. - // Otherwise it sets *p to the current element of the sequence, - // advances the iterator, and returns true. - Next(p *Value) bool - Done() -} - -// A Mapping is a mapping from keys to values, such as a dictionary. -// -// If a type satisfies both Mapping and Iterable, the iterator yields -// the keys of the mapping. -type Mapping interface { - Value - // Get returns the value corresponding to the specified key, - // or !found if the mapping does not contain the key. - // - // Get also defines the behavior of "v in mapping". - // The 'in' operator reports the 'found' component, ignoring errors. - Get(Value) (v Value, found bool, err error) -} - -// An IterableMapping is a mapping that supports key enumeration. -type IterableMapping interface { - Mapping - Iterate() Iterator // see Iterable interface - Items() []Tuple // a new slice containing all key/value pairs -} - -var _ IterableMapping = (*Dict)(nil) - -// A HasSetKey supports map update using x[k]=v syntax, like a dictionary. -type HasSetKey interface { - Mapping - SetKey(k, v Value) error -} - -var _ HasSetKey = (*Dict)(nil) - -// A HasBinary value may be used as either operand of these binary operators: -// + - * / // % in not in | & ^ << >> -// -// The Side argument indicates whether the receiver is the left or right operand. -// -// An implementation may decline to handle an operation by returning (nil, nil). -// For this reason, clients should always call the standalone Binary(op, x, y) -// function rather than calling the method directly. -type HasBinary interface { - Value - Binary(op syntax.Token, y Value, side Side) (Value, error) -} - -type Side bool - -const ( - Left Side = false - Right Side = true -) - -// A HasUnary value may be used as the operand of these unary operators: -// + - ~ -// -// An implementation may decline to handle an operation by returning (nil, nil). -// For this reason, clients should always call the standalone Unary(op, x) -// function rather than calling the method directly. -type HasUnary interface { - Value - Unary(op syntax.Token) (Value, error) -} - -// A HasAttrs value has fields or methods that may be read by a dot expression (y = x.f). -// Attribute names may be listed using the built-in 'dir' function. -// -// For implementation convenience, a result of (nil, nil) from Attr is -// interpreted as a "no such field or method" error. Implementations are -// free to return a more precise error. -type HasAttrs interface { - Value - Attr(name string) (Value, error) // returns (nil, nil) if attribute not present - AttrNames() []string // callers must not modify the result. -} - -var ( - _ HasAttrs = String("") - _ HasAttrs = new(List) - _ HasAttrs = new(Dict) - _ HasAttrs = new(Set) -) - -// A HasSetField value has fields that may be written by a dot expression (x.f = y). -// -// An implementation of SetField may return a NoSuchAttrError, -// in which case the runtime may augment the error message to -// warn of possible misspelling. -type HasSetField interface { - HasAttrs - SetField(name string, val Value) error -} - -// A NoSuchAttrError may be returned by an implementation of -// HasAttrs.Attr or HasSetField.SetField to indicate that no such field -// exists. In that case the runtime may augment the error message to -// warn of possible misspelling. -type NoSuchAttrError string - -func (e NoSuchAttrError) Error() string { return string(e) } - -// NoneType is the type of None. Its only legal value is None. -// (We represent it as a number, not struct{}, so that None may be constant.) -type NoneType byte - -const None = NoneType(0) - -func (NoneType) String() string { return "None" } -func (NoneType) Type() string { return "NoneType" } -func (NoneType) Freeze() {} // immutable -func (NoneType) Truth() Bool { return False } -func (NoneType) Hash() (uint32, error) { return 0, nil } - -// Bool is the type of a Starlark bool. -type Bool bool - -const ( - False Bool = false - True Bool = true -) - -func (b Bool) String() string { - if b { - return "True" - } else { - return "False" - } -} -func (b Bool) Type() string { return "bool" } -func (b Bool) Freeze() {} // immutable -func (b Bool) Truth() Bool { return b } -func (b Bool) Hash() (uint32, error) { return uint32(b2i(bool(b))), nil } -func (x Bool) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Bool) - return threeway(op, b2i(bool(x))-b2i(bool(y))), nil -} - -// Float is the type of a Starlark float. -type Float float64 - -func (f Float) String() string { - var buf strings.Builder - f.format(&buf, 'g') - return buf.String() -} - -func (f Float) format(buf *strings.Builder, conv byte) { - ff := float64(f) - if !isFinite(ff) { - if math.IsInf(ff, +1) { - buf.WriteString("+inf") - } else if math.IsInf(ff, -1) { - buf.WriteString("-inf") - } else { - buf.WriteString("nan") - } - return - } - - // %g is the default format used by str. - // It uses the minimum precision to avoid ambiguity, - // and always includes a '.' or an 'e' so that the value - // is self-evidently a float, not an int. - if conv == 'g' || conv == 'G' { - s := strconv.FormatFloat(ff, conv, -1, 64) - buf.WriteString(s) - // Ensure result always has a decimal point if no exponent. - // "123" -> "123.0" - if strings.IndexByte(s, conv-'g'+'e') < 0 && strings.IndexByte(s, '.') < 0 { - buf.WriteString(".0") - } - return - } - - // %[eEfF] use 6-digit precision - buf.WriteString(strconv.FormatFloat(ff, conv, 6, 64)) -} - -func (f Float) Type() string { return "float" } -func (f Float) Freeze() {} // immutable -func (f Float) Truth() Bool { return f != 0.0 } -func (f Float) Hash() (uint32, error) { - // Equal float and int values must yield the same hash. - // TODO(adonovan): opt: if f is non-integral, and thus not equal - // to any Int, we can avoid the Int conversion and use a cheaper hash. - if isFinite(float64(f)) { - return finiteFloatToInt(f).Hash() - } - return 1618033, nil // NaN, +/-Inf -} - -func floor(f Float) Float { return Float(math.Floor(float64(f))) } - -// isFinite reports whether f represents a finite rational value. -// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0). -func isFinite(f float64) bool { - return math.Abs(f) <= math.MaxFloat64 -} - -func (x Float) Cmp(y_ Value, depth int) (int, error) { - y := y_.(Float) - return floatCmp(x, y), nil -} - -// floatCmp performs a three-valued comparison on floats, -// which are totally ordered with NaN > +Inf. -func floatCmp(x, y Float) int { - if x > y { - return +1 - } else if x < y { - return -1 - } else if x == y { - return 0 - } - - // At least one operand is NaN. - if x == x { - return -1 // y is NaN - } else if y == y { - return +1 // x is NaN - } - return 0 // both NaN -} - -func (f Float) rational() *big.Rat { return new(big.Rat).SetFloat64(float64(f)) } - -// AsFloat returns the float64 value closest to x. -// The f result is undefined if x is not a float or Int. -// The result may be infinite if x is a very large Int. -func AsFloat(x Value) (f float64, ok bool) { - switch x := x.(type) { - case Float: - return float64(x), true - case Int: - return float64(x.Float()), true - } - return 0, false -} - -func (x Float) Mod(y Float) Float { - z := Float(math.Mod(float64(x), float64(y))) - if (x < 0) != (y < 0) && z != 0 { - z += y - } - return z -} - -// Unary implements the operations +float and -float. -func (f Float) Unary(op syntax.Token) (Value, error) { - switch op { - case syntax.MINUS: - return -f, nil - case syntax.PLUS: - return +f, nil - } - return nil, nil -} - -// String is the type of a Starlark text string. -// -// A String encapsulates an an immutable sequence of bytes, -// but strings are not directly iterable. Instead, iterate -// over the result of calling one of these four methods: -// codepoints, codepoint_ords, elems, elem_ords. -// -// Strings typically contain text; use Bytes for binary strings. -// The Starlark spec defines text strings as sequences of UTF-k -// codes that encode Unicode code points. In this Go implementation, -// k=8, whereas in a Java implementation, k=16. For portability, -// operations on strings should aim to avoid assumptions about -// the value of k. -// -// Warning: the contract of the Value interface's String method is that -// it returns the value printed in Starlark notation, -// so s.String() or fmt.Sprintf("%s", s) returns a quoted string. -// Use string(s) or s.GoString() or fmt.Sprintf("%#v", s) to obtain the raw contents -// of a Starlark string as a Go string. -type String string - -func (s String) String() string { return syntax.Quote(string(s), false) } -func (s String) GoString() string { return string(s) } -func (s String) Type() string { return "string" } -func (s String) Freeze() {} // immutable -func (s String) Truth() Bool { return len(s) > 0 } -func (s String) Hash() (uint32, error) { return hashString(string(s)), nil } -func (s String) Len() int { return len(s) } // bytes -func (s String) Index(i int) Value { return s[i : i+1] } - -func (s String) Slice(start, end, step int) Value { - if step == 1 { - return s[start:end] - } - - sign := signum(step) - var str []byte - for i := start; signum(end-i) == sign; i += step { - str = append(str, s[i]) - } - return String(str) -} - -func (s String) Attr(name string) (Value, error) { return builtinAttr(s, name, stringMethods) } -func (s String) AttrNames() []string { return builtinAttrNames(stringMethods) } - -func (x String) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(String) - return threeway(op, strings.Compare(string(x), string(y))), nil -} - -func AsString(x Value) (string, bool) { v, ok := x.(String); return string(v), ok } - -// A stringElems is an iterable whose iterator yields a sequence of -// elements (bytes), either numerically or as successive substrings. -// It is an indexable sequence. -type stringElems struct { - s String - ords bool -} - -var ( - _ Iterable = (*stringElems)(nil) - _ Indexable = (*stringElems)(nil) -) - -func (si stringElems) String() string { - if si.ords { - return si.s.String() + ".elem_ords()" - } else { - return si.s.String() + ".elems()" - } -} -func (si stringElems) Type() string { return "string.elems" } -func (si stringElems) Freeze() {} // immutable -func (si stringElems) Truth() Bool { return True } -func (si stringElems) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", si.Type()) } -func (si stringElems) Iterate() Iterator { return &stringElemsIterator{si, 0} } -func (si stringElems) Len() int { return len(si.s) } -func (si stringElems) Index(i int) Value { - if si.ords { - return MakeInt(int(si.s[i])) - } else { - // TODO(adonovan): opt: preallocate canonical 1-byte strings - // to avoid interface allocation. - return si.s[i : i+1] - } -} - -type stringElemsIterator struct { - si stringElems - i int -} - -func (it *stringElemsIterator) Next(p *Value) bool { - if it.i == len(it.si.s) { - return false - } - *p = it.si.Index(it.i) - it.i++ - return true -} - -func (*stringElemsIterator) Done() {} - -// A stringCodepoints is an iterable whose iterator yields a sequence of -// Unicode code points, either numerically or as successive substrings. -// It is not indexable. -type stringCodepoints struct { - s String - ords bool -} - -var _ Iterable = (*stringCodepoints)(nil) - -func (si stringCodepoints) String() string { - if si.ords { - return si.s.String() + ".codepoint_ords()" - } else { - return si.s.String() + ".codepoints()" - } -} -func (si stringCodepoints) Type() string { return "string.codepoints" } -func (si stringCodepoints) Freeze() {} // immutable -func (si stringCodepoints) Truth() Bool { return True } -func (si stringCodepoints) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", si.Type()) } -func (si stringCodepoints) Iterate() Iterator { return &stringCodepointsIterator{si, 0} } - -type stringCodepointsIterator struct { - si stringCodepoints - i int -} - -func (it *stringCodepointsIterator) Next(p *Value) bool { - s := it.si.s[it.i:] - if s == "" { - return false - } - r, sz := utf8.DecodeRuneInString(string(s)) - if !it.si.ords { - if r == utf8.RuneError { - *p = String(r) - } else { - *p = s[:sz] - } - } else { - *p = MakeInt(int(r)) - } - it.i += sz - return true -} - -func (*stringCodepointsIterator) Done() {} - -// A Function is a function defined by a Starlark def statement or lambda expression. -// The initialization behavior of a Starlark module is also represented by a Function. -type Function struct { - funcode *compile.Funcode - module *module - defaults Tuple - freevars Tuple -} - -// A module is the dynamic counterpart to a Program. -// All functions in the same program share a module. -type module struct { - program *compile.Program - predeclared StringDict - globals []Value - constants []Value -} - -// makeGlobalDict returns a new, unfrozen StringDict containing all global -// variables so far defined in the module. -func (m *module) makeGlobalDict() StringDict { - r := make(StringDict, len(m.program.Globals)) - for i, id := range m.program.Globals { - if v := m.globals[i]; v != nil { - r[id.Name] = v - } - } - return r -} - -func (fn *Function) Name() string { return fn.funcode.Name } // "lambda" for anonymous functions -func (fn *Function) Doc() string { return fn.funcode.Doc } -func (fn *Function) Hash() (uint32, error) { return hashString(fn.funcode.Name), nil } -func (fn *Function) Freeze() { fn.defaults.Freeze(); fn.freevars.Freeze() } -func (fn *Function) String() string { return toString(fn) } -func (fn *Function) Type() string { return "function" } -func (fn *Function) Truth() Bool { return true } - -// Globals returns a new, unfrozen StringDict containing all global -// variables so far defined in the function's module. -func (fn *Function) Globals() StringDict { return fn.module.makeGlobalDict() } - -func (fn *Function) Position() syntax.Position { return fn.funcode.Pos } -func (fn *Function) NumParams() int { return fn.funcode.NumParams } -func (fn *Function) NumKwonlyParams() int { return fn.funcode.NumKwonlyParams } - -// Param returns the name and position of the ith parameter, -// where 0 <= i < NumParams(). -// The *args and **kwargs parameters are at the end -// even if there were optional parameters after *args. -func (fn *Function) Param(i int) (string, syntax.Position) { - if i >= fn.NumParams() { - panic(i) - } - id := fn.funcode.Locals[i] - return id.Name, id.Pos -} - -// ParamDefault returns the default value of the specified parameter -// (0 <= i < NumParams()), or nil if the parameter is not optional. -func (fn *Function) ParamDefault(i int) Value { - if i < 0 || i >= fn.NumParams() { - panic(i) - } - - // fn.defaults omits all required params up to the first optional param. It - // also does not include *args or **kwargs at the end. - firstOptIdx := fn.NumParams() - len(fn.defaults) - if fn.HasVarargs() { - firstOptIdx-- - } - if fn.HasKwargs() { - firstOptIdx-- - } - if i < firstOptIdx || i >= firstOptIdx+len(fn.defaults) { - return nil - } - - dflt := fn.defaults[i-firstOptIdx] - if _, ok := dflt.(mandatory); ok { - return nil - } - return dflt -} - -func (fn *Function) HasVarargs() bool { return fn.funcode.HasVarargs } -func (fn *Function) HasKwargs() bool { return fn.funcode.HasKwargs } - -// A Builtin is a function implemented in Go. -type Builtin struct { - name string - fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error) - recv Value // for bound methods (e.g. "".startswith) -} - -func (b *Builtin) Name() string { return b.name } -func (b *Builtin) Freeze() { - if b.recv != nil { - b.recv.Freeze() - } -} -func (b *Builtin) Hash() (uint32, error) { - h := hashString(b.name) - if b.recv != nil { - h ^= 5521 - } - return h, nil -} -func (b *Builtin) Receiver() Value { return b.recv } -func (b *Builtin) String() string { return toString(b) } -func (b *Builtin) Type() string { return "builtin_function_or_method" } -func (b *Builtin) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { - return b.fn(thread, b, args, kwargs) -} -func (b *Builtin) Truth() Bool { return true } - -// NewBuiltin returns a new 'builtin_function_or_method' value with the specified name -// and implementation. It compares unequal with all other values. -func NewBuiltin(name string, fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error)) *Builtin { - return &Builtin{name: name, fn: fn} -} - -// BindReceiver returns a new Builtin value representing a method -// closure, that is, a built-in function bound to a receiver value. -// -// In the example below, the value of f is the string.index -// built-in method bound to the receiver value "abc": -// -// f = "abc".index; f("a"); f("b") -// -// In the common case, the receiver is bound only during the call, -// but this still results in the creation of a temporary method closure: -// -// "abc".index("a") -func (b *Builtin) BindReceiver(recv Value) *Builtin { - return &Builtin{name: b.name, fn: b.fn, recv: recv} -} - -// A *Dict represents a Starlark dictionary. -// The zero value of Dict is a valid empty dictionary. -// If you know the exact final number of entries, -// it is more efficient to call NewDict. -type Dict struct { - ht hashtable -} - -// NewDict returns a set with initial space for -// at least size insertions before rehashing. -func NewDict(size int) *Dict { - dict := new(Dict) - dict.ht.init(size) - return dict -} - -func (d *Dict) Clear() error { return d.ht.clear() } -func (d *Dict) Delete(k Value) (v Value, found bool, err error) { return d.ht.delete(k) } -func (d *Dict) Get(k Value) (v Value, found bool, err error) { return d.ht.lookup(k) } -func (d *Dict) Items() []Tuple { return d.ht.items() } -func (d *Dict) Keys() []Value { return d.ht.keys() } -func (d *Dict) Len() int { return int(d.ht.len) } -func (d *Dict) Iterate() Iterator { return d.ht.iterate() } -func (d *Dict) SetKey(k, v Value) error { return d.ht.insert(k, v) } -func (d *Dict) String() string { return toString(d) } -func (d *Dict) Type() string { return "dict" } -func (d *Dict) Freeze() { d.ht.freeze() } -func (d *Dict) Truth() Bool { return d.Len() > 0 } -func (d *Dict) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: dict") } - -func (x *Dict) Union(y *Dict) *Dict { - z := new(Dict) - z.ht.init(x.Len()) // a lower bound - z.ht.addAll(&x.ht) // can't fail - z.ht.addAll(&y.ht) // can't fail - return z -} - -func (d *Dict) Attr(name string) (Value, error) { return builtinAttr(d, name, dictMethods) } -func (d *Dict) AttrNames() []string { return builtinAttrNames(dictMethods) } - -func (x *Dict) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*Dict) - switch op { - case syntax.EQL: - ok, err := dictsEqual(x, y, depth) - return ok, err - case syntax.NEQ: - ok, err := dictsEqual(x, y, depth) - return !ok, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func dictsEqual(x, y *Dict, depth int) (bool, error) { - if x.Len() != y.Len() { - return false, nil - } - for e := x.ht.head; e != nil; e = e.next { - key, xval := e.key, e.value - - if yval, found, _ := y.Get(key); !found { - return false, nil - } else if eq, err := EqualDepth(xval, yval, depth-1); err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil -} - -// A *List represents a Starlark list value. -type List struct { - elems []Value - frozen bool - itercount uint32 // number of active iterators (ignored if frozen) -} - -// NewList returns a list containing the specified elements. -// Callers should not subsequently modify elems. -func NewList(elems []Value) *List { return &List{elems: elems} } - -func (l *List) Freeze() { - if !l.frozen { - l.frozen = true - for _, elem := range l.elems { - elem.Freeze() - } - } -} - -// checkMutable reports an error if the list should not be mutated. -// verb+" list" should describe the operation. -func (l *List) checkMutable(verb string) error { - if l.frozen { - return fmt.Errorf("cannot %s frozen list", verb) - } - if l.itercount > 0 { - return fmt.Errorf("cannot %s list during iteration", verb) - } - return nil -} - -func (l *List) String() string { return toString(l) } -func (l *List) Type() string { return "list" } -func (l *List) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: list") } -func (l *List) Truth() Bool { return l.Len() > 0 } -func (l *List) Len() int { return len(l.elems) } -func (l *List) Index(i int) Value { return l.elems[i] } - -func (l *List) Slice(start, end, step int) Value { - if step == 1 { - elems := append([]Value{}, l.elems[start:end]...) - return NewList(elems) - } - - sign := signum(step) - var list []Value - for i := start; signum(end-i) == sign; i += step { - list = append(list, l.elems[i]) - } - return NewList(list) -} - -func (l *List) Attr(name string) (Value, error) { return builtinAttr(l, name, listMethods) } -func (l *List) AttrNames() []string { return builtinAttrNames(listMethods) } - -func (l *List) Iterate() Iterator { - if !l.frozen { - l.itercount++ - } - return &listIterator{l: l} -} - -func (x *List) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*List) - // It's tempting to check x == y as an optimization here, - // but wrong because a list containing NaN is not equal to itself. - return sliceCompare(op, x.elems, y.elems, depth) -} - -func sliceCompare(op syntax.Token, x, y []Value, depth int) (bool, error) { - // Fast path: check length. - if len(x) != len(y) && (op == syntax.EQL || op == syntax.NEQ) { - return op == syntax.NEQ, nil - } - - // Find first element that is not equal in both lists. - for i := 0; i < len(x) && i < len(y); i++ { - if eq, err := EqualDepth(x[i], y[i], depth-1); err != nil { - return false, err - } else if !eq { - switch op { - case syntax.EQL: - return false, nil - case syntax.NEQ: - return true, nil - default: - return CompareDepth(op, x[i], y[i], depth-1) - } - } - } - - return threeway(op, len(x)-len(y)), nil -} - -type listIterator struct { - l *List - i int -} - -func (it *listIterator) Next(p *Value) bool { - if it.i < it.l.Len() { - *p = it.l.elems[it.i] - it.i++ - return true - } - return false -} - -func (it *listIterator) Done() { - if !it.l.frozen { - it.l.itercount-- - } -} - -func (l *List) SetIndex(i int, v Value) error { - if err := l.checkMutable("assign to element of"); err != nil { - return err - } - l.elems[i] = v - return nil -} - -func (l *List) Append(v Value) error { - if err := l.checkMutable("append to"); err != nil { - return err - } - l.elems = append(l.elems, v) - return nil -} - -func (l *List) Clear() error { - if err := l.checkMutable("clear"); err != nil { - return err - } - for i := range l.elems { - l.elems[i] = nil // aid GC - } - l.elems = l.elems[:0] - return nil -} - -// A Tuple represents a Starlark tuple value. -type Tuple []Value - -func (t Tuple) Len() int { return len(t) } -func (t Tuple) Index(i int) Value { return t[i] } - -func (t Tuple) Slice(start, end, step int) Value { - if step == 1 { - return t[start:end] - } - - sign := signum(step) - var tuple Tuple - for i := start; signum(end-i) == sign; i += step { - tuple = append(tuple, t[i]) - } - return tuple -} - -func (t Tuple) Iterate() Iterator { return &tupleIterator{elems: t} } -func (t Tuple) Freeze() { - for _, elem := range t { - elem.Freeze() - } -} -func (t Tuple) String() string { return toString(t) } -func (t Tuple) Type() string { return "tuple" } -func (t Tuple) Truth() Bool { return len(t) > 0 } - -func (x Tuple) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Tuple) - return sliceCompare(op, x, y, depth) -} - -func (t Tuple) Hash() (uint32, error) { - // Use same algorithm as Python. - var x, mult uint32 = 0x345678, 1000003 - for _, elem := range t { - y, err := elem.Hash() - if err != nil { - return 0, err - } - x = x ^ y*mult - mult += 82520 + uint32(len(t)+len(t)) - } - return x, nil -} - -type tupleIterator struct{ elems Tuple } - -func (it *tupleIterator) Next(p *Value) bool { - if len(it.elems) > 0 { - *p = it.elems[0] - it.elems = it.elems[1:] - return true - } - return false -} - -func (it *tupleIterator) Done() {} - -// A Set represents a Starlark set value. -// The zero value of Set is a valid empty set. -// If you know the exact final number of elements, -// it is more efficient to call NewSet. -type Set struct { - ht hashtable // values are all None -} - -// NewSet returns a dictionary with initial space for -// at least size insertions before rehashing. -func NewSet(size int) *Set { - set := new(Set) - set.ht.init(size) - return set -} - -func (s *Set) Delete(k Value) (found bool, err error) { _, found, err = s.ht.delete(k); return } -func (s *Set) Clear() error { return s.ht.clear() } -func (s *Set) Has(k Value) (found bool, err error) { _, found, err = s.ht.lookup(k); return } -func (s *Set) Insert(k Value) error { return s.ht.insert(k, None) } -func (s *Set) Len() int { return int(s.ht.len) } -func (s *Set) Iterate() Iterator { return s.ht.iterate() } -func (s *Set) String() string { return toString(s) } -func (s *Set) Type() string { return "set" } -func (s *Set) Freeze() { s.ht.freeze() } -func (s *Set) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: set") } -func (s *Set) Truth() Bool { return s.Len() > 0 } - -func (s *Set) Attr(name string) (Value, error) { return builtinAttr(s, name, setMethods) } -func (s *Set) AttrNames() []string { return builtinAttrNames(setMethods) } - -func (x *Set) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*Set) - switch op { - case syntax.EQL: - ok, err := setsEqual(x, y, depth) - return ok, err - case syntax.NEQ: - ok, err := setsEqual(x, y, depth) - return !ok, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func setsEqual(x, y *Set, depth int) (bool, error) { - if x.Len() != y.Len() { - return false, nil - } - for e := x.ht.head; e != nil; e = e.next { - if found, _ := y.Has(e.key); !found { - return false, nil - } - } - return true, nil -} - -func (s *Set) Union(iter Iterator) (Value, error) { - set := new(Set) - for e := s.ht.head; e != nil; e = e.next { - set.Insert(e.key) // can't fail - } - var x Value - for iter.Next(&x) { - if err := set.Insert(x); err != nil { - return nil, err - } - } - return set, nil -} - -// toString returns the string form of value v. -// It may be more efficient than v.String() for larger values. -func toString(v Value) string { - buf := new(strings.Builder) - writeValue(buf, v, nil) - return buf.String() -} - -// writeValue writes x to out. -// -// path is used to detect cycles. -// It contains the list of *List and *Dict values we're currently printing. -// (These are the only potentially cyclic structures.) -// Callers should generally pass nil for path. -// It is safe to re-use the same path slice for multiple calls. -func writeValue(out *strings.Builder, x Value, path []Value) { - switch x := x.(type) { - case nil: - out.WriteString("") // indicates a bug - - // These four cases are duplicates of T.String(), for efficiency. - case NoneType: - out.WriteString("None") - - case Int: - out.WriteString(x.String()) - - case Bool: - if x { - out.WriteString("True") - } else { - out.WriteString("False") - } - - case String: - out.WriteString(syntax.Quote(string(x), false)) - - case *List: - out.WriteByte('[') - if pathContains(path, x) { - out.WriteString("...") // list contains itself - } else { - for i, elem := range x.elems { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, append(path, x)) - } - } - out.WriteByte(']') - - case Tuple: - out.WriteByte('(') - for i, elem := range x { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, path) - } - if len(x) == 1 { - out.WriteByte(',') - } - out.WriteByte(')') - - case *Function: - fmt.Fprintf(out, "", x.Name()) - - case *Builtin: - if x.recv != nil { - fmt.Fprintf(out, "", x.Name(), x.recv.Type()) - } else { - fmt.Fprintf(out, "", x.Name()) - } - - case *Dict: - out.WriteByte('{') - if pathContains(path, x) { - out.WriteString("...") // dict contains itself - } else { - sep := "" - for e := x.ht.head; e != nil; e = e.next { - k, v := e.key, e.value - out.WriteString(sep) - writeValue(out, k, path) - out.WriteString(": ") - writeValue(out, v, append(path, x)) // cycle check - sep = ", " - } - } - out.WriteByte('}') - - case *Set: - out.WriteString("set([") - for e := x.ht.head; e != nil; e = e.next { - if e != x.ht.head { - out.WriteString(", ") - } - writeValue(out, e.key, path) - } - out.WriteString("])") - - default: - out.WriteString(x.String()) - } -} - -func pathContains(path []Value, x Value) bool { - for _, y := range path { - if x == y { - return true - } - } - return false -} - -// CompareLimit is the depth limit on recursive comparison operations such as == and <. -// Comparison of data structures deeper than this limit may fail. -var CompareLimit = 10 - -// Equal reports whether two Starlark values are equal. -func Equal(x, y Value) (bool, error) { - if x, ok := x.(String); ok { - return x == y, nil // fast path for an important special case - } - return EqualDepth(x, y, CompareLimit) -} - -// EqualDepth reports whether two Starlark values are equal. -// -// Recursive comparisons by implementations of Value.CompareSameType -// should use EqualDepth to prevent infinite recursion. -func EqualDepth(x, y Value, depth int) (bool, error) { - return CompareDepth(syntax.EQL, x, y, depth) -} - -// Compare compares two Starlark values. -// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. -// Compare returns an error if an ordered comparison was -// requested for a type that does not support it. -// -// Recursive comparisons by implementations of Value.CompareSameType -// should use CompareDepth to prevent infinite recursion. -func Compare(op syntax.Token, x, y Value) (bool, error) { - return CompareDepth(op, x, y, CompareLimit) -} - -// CompareDepth compares two Starlark values. -// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. -// CompareDepth returns an error if an ordered comparison was -// requested for a pair of values that do not support it. -// -// The depth parameter limits the maximum depth of recursion -// in cyclic data structures. -func CompareDepth(op syntax.Token, x, y Value, depth int) (bool, error) { - if depth < 1 { - return false, fmt.Errorf("comparison exceeded maximum recursion depth") - } - if sameType(x, y) { - if xcomp, ok := x.(Comparable); ok { - return xcomp.CompareSameType(op, y, depth) - } - - if xcomp, ok := x.(TotallyOrdered); ok { - t, err := xcomp.Cmp(y, depth) - if err != nil { - return false, err - } - return threeway(op, t), nil - } - - // use identity comparison - switch op { - case syntax.EQL: - return x == y, nil - case syntax.NEQ: - return x != y, nil - } - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } - - // different types - - // int/float ordered comparisons - switch x := x.(type) { - case Int: - if y, ok := y.(Float); ok { - var cmp int - if y != y { - cmp = -1 // y is NaN - } else if !math.IsInf(float64(y), 0) { - cmp = x.rational().Cmp(y.rational()) // y is finite - } else if y > 0 { - cmp = -1 // y is +Inf - } else { - cmp = +1 // y is -Inf - } - return threeway(op, cmp), nil - } - case Float: - if y, ok := y.(Int); ok { - var cmp int - if x != x { - cmp = +1 // x is NaN - } else if !math.IsInf(float64(x), 0) { - cmp = x.rational().Cmp(y.rational()) // x is finite - } else if x > 0 { - cmp = +1 // x is +Inf - } else { - cmp = -1 // x is -Inf - } - return threeway(op, cmp), nil - } - } - - // All other values of different types compare unequal. - switch op { - case syntax.EQL: - return false, nil - case syntax.NEQ: - return true, nil - } - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) -} - -func sameType(x, y Value) bool { - return reflect.TypeOf(x) == reflect.TypeOf(y) || x.Type() == y.Type() -} - -// threeway interprets a three-way comparison value cmp (-1, 0, +1) -// as a boolean comparison (e.g. x < y). -func threeway(op syntax.Token, cmp int) bool { - switch op { - case syntax.EQL: - return cmp == 0 - case syntax.NEQ: - return cmp != 0 - case syntax.LE: - return cmp <= 0 - case syntax.LT: - return cmp < 0 - case syntax.GE: - return cmp >= 0 - case syntax.GT: - return cmp > 0 - } - panic(op) -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -// Len returns the length of a string or sequence value, -// and -1 for all others. -// -// Warning: Len(x) >= 0 does not imply Iterate(x) != nil. -// A string has a known length but is not directly iterable. -func Len(x Value) int { - switch x := x.(type) { - case String: - return x.Len() - case Indexable: - return x.Len() - case Sequence: - return x.Len() - } - return -1 -} - -// Iterate return a new iterator for the value if iterable, nil otherwise. -// If the result is non-nil, the caller must call Done when finished with it. -// -// Warning: Iterate(x) != nil does not imply Len(x) >= 0. -// Some iterables may have unknown length. -func Iterate(x Value) Iterator { - if x, ok := x.(Iterable); ok { - return x.Iterate() - } - return nil -} - -// Bytes is the type of a Starlark binary string. -// -// A Bytes encapsulates an immutable sequence of bytes. -// It is comparable, indexable, and sliceable, but not direcly iterable; -// use bytes.elems() for an iterable view. -// -// In this Go implementation, the elements of 'string' and 'bytes' are -// both bytes, but in other implementations, notably Java, the elements -// of a 'string' are UTF-16 codes (Java chars). The spec abstracts text -// strings as sequences of UTF-k codes that encode Unicode code points, -// and operations that convert from text to binary incur UTF-k-to-UTF-8 -// transcoding; conversely, conversion from binary to text incurs -// UTF-8-to-UTF-k transcoding. Because k=8 for Go, these operations -// are the identity function, at least for valid encodings of text. -type Bytes string - -var ( - _ Comparable = Bytes("") - _ Sliceable = Bytes("") - _ Indexable = Bytes("") -) - -func (b Bytes) String() string { return syntax.Quote(string(b), true) } -func (b Bytes) Type() string { return "bytes" } -func (b Bytes) Freeze() {} // immutable -func (b Bytes) Truth() Bool { return len(b) > 0 } -func (b Bytes) Hash() (uint32, error) { return String(b).Hash() } -func (b Bytes) Len() int { return len(b) } -func (b Bytes) Index(i int) Value { return b[i : i+1] } - -func (b Bytes) Attr(name string) (Value, error) { return builtinAttr(b, name, bytesMethods) } -func (b Bytes) AttrNames() []string { return builtinAttrNames(bytesMethods) } - -func (b Bytes) Slice(start, end, step int) Value { - if step == 1 { - return b[start:end] - } - - sign := signum(step) - var str []byte - for i := start; signum(end-i) == sign; i += step { - str = append(str, b[i]) - } - return Bytes(str) -} - -func (x Bytes) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Bytes) - return threeway(op, strings.Compare(string(x), string(y))), nil -} diff --git a/vendor/go.starlark.net/starlarkstruct/module.go b/vendor/go.starlark.net/starlarkstruct/module.go deleted file mode 100644 index 735c98ae3..000000000 --- a/vendor/go.starlark.net/starlarkstruct/module.go +++ /dev/null @@ -1,43 +0,0 @@ -package starlarkstruct - -import ( - "fmt" - - "go.starlark.net/starlark" -) - -// A Module is a named collection of values, -// typically a suite of functions imported by a load statement. -// -// It differs from Struct primarily in that its string representation -// does not enumerate its fields. -type Module struct { - Name string - Members starlark.StringDict -} - -var _ starlark.HasAttrs = (*Module)(nil) - -func (m *Module) Attr(name string) (starlark.Value, error) { return m.Members[name], nil } -func (m *Module) AttrNames() []string { return m.Members.Keys() } -func (m *Module) Freeze() { m.Members.Freeze() } -func (m *Module) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", m.Type()) } -func (m *Module) String() string { return fmt.Sprintf("", m.Name) } -func (m *Module) Truth() starlark.Bool { return true } -func (m *Module) Type() string { return "module" } - -// MakeModule may be used as the implementation of a Starlark built-in -// function, module(name, **kwargs). It returns a new module with the -// specified name and members. -func MakeModule(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - var name string - if err := starlark.UnpackPositionalArgs(b.Name(), args, nil, 1, &name); err != nil { - return nil, err - } - members := make(starlark.StringDict, len(kwargs)) - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - members[k] = kwarg[1] - } - return &Module{name, members}, nil -} diff --git a/vendor/go.starlark.net/starlarkstruct/struct.go b/vendor/go.starlark.net/starlarkstruct/struct.go deleted file mode 100644 index ea2b1f639..000000000 --- a/vendor/go.starlark.net/starlarkstruct/struct.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package starlarkstruct defines the Starlark types 'struct' and -// 'module', both optional language extensions. -// -package starlarkstruct // import "go.starlark.net/starlarkstruct" - -// It is tempting to introduce a variant of Struct that is a wrapper -// around a Go struct value, for stronger typing guarantees and more -// efficient and convenient field lookup. However: -// 1) all fields of Starlark structs are optional, so we cannot represent -// them using more specific types such as String, Int, *Depset, and -// *File, as such types give no way to represent missing fields. -// 2) the efficiency gain of direct struct field access is rather -// marginal: finding the index of a field by binary searching on the -// sorted list of field names is quite fast compared to the other -// overheads. -// 3) the gains in compactness and spatial locality are also rather -// marginal: the array behind the []entry slice is (due to field name -// strings) only a factor of 2 larger than the corresponding Go struct -// would be, and, like the Go struct, requires only a single allocation. - -import ( - "fmt" - "sort" - "strings" - - "go.starlark.net/starlark" - "go.starlark.net/syntax" -) - -// Make is the implementation of a built-in function that instantiates -// an immutable struct from the specified keyword arguments. -// -// An application can add 'struct' to the Starlark environment like so: -// -// globals := starlark.StringDict{ -// "struct": starlark.NewBuiltin("struct", starlarkstruct.Make), -// } -// -func Make(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if len(args) > 0 { - return nil, fmt.Errorf("struct: unexpected positional arguments") - } - return FromKeywords(Default, kwargs), nil -} - -// FromKeywords returns a new struct instance whose fields are specified by the -// key/value pairs in kwargs. (Each kwargs[i][0] must be a starlark.String.) -func FromKeywords(constructor starlark.Value, kwargs []starlark.Tuple) *Struct { - if constructor == nil { - panic("nil constructor") - } - s := &Struct{ - constructor: constructor, - entries: make(entries, 0, len(kwargs)), - } - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - v := kwarg[1] - s.entries = append(s.entries, entry{k, v}) - } - sort.Sort(s.entries) - return s -} - -// FromStringDict returns a new struct instance whose elements are those of d. -// The constructor parameter specifies the constructor; use Default for an ordinary struct. -func FromStringDict(constructor starlark.Value, d starlark.StringDict) *Struct { - if constructor == nil { - panic("nil constructor") - } - s := &Struct{ - constructor: constructor, - entries: make(entries, 0, len(d)), - } - for k, v := range d { - s.entries = append(s.entries, entry{k, v}) - } - sort.Sort(s.entries) - return s -} - -// Struct is an immutable Starlark type that maps field names to values. -// It is not iterable and does not support len. -// -// A struct has a constructor, a distinct value that identifies a class -// of structs, and which appears in the struct's string representation. -// -// Operations such as x+y fail if the constructors of the two operands -// are not equal. -// -// The default constructor, Default, is the string "struct", but -// clients may wish to 'brand' structs for their own purposes. -// The constructor value appears in the printed form of the value, -// and is accessible using the Constructor method. -// -// Use Attr to access its fields and AttrNames to enumerate them. -type Struct struct { - constructor starlark.Value - entries entries // sorted by name -} - -// Default is the default constructor for structs. -// It is merely the string "struct". -const Default = starlark.String("struct") - -type entries []entry - -func (a entries) Len() int { return len(a) } -func (a entries) Less(i, j int) bool { return a[i].name < a[j].name } -func (a entries) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type entry struct { - name string - value starlark.Value -} - -var ( - _ starlark.HasAttrs = (*Struct)(nil) - _ starlark.HasBinary = (*Struct)(nil) -) - -// ToStringDict adds a name/value entry to d for each field of the struct. -func (s *Struct) ToStringDict(d starlark.StringDict) { - for _, e := range s.entries { - d[e.name] = e.value - } -} - -func (s *Struct) String() string { - buf := new(strings.Builder) - switch constructor := s.constructor.(type) { - case starlark.String: - // NB: The Java implementation always prints struct - // even for Bazel provider instances. - buf.WriteString(constructor.GoString()) // avoid String()'s quotation - default: - buf.WriteString(s.constructor.String()) - } - buf.WriteByte('(') - for i, e := range s.entries { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(e.name) - buf.WriteString(" = ") - buf.WriteString(e.value.String()) - } - buf.WriteByte(')') - return buf.String() -} - -// Constructor returns the constructor used to create this struct. -func (s *Struct) Constructor() starlark.Value { return s.constructor } - -func (s *Struct) Type() string { return "struct" } -func (s *Struct) Truth() starlark.Bool { return true } // even when empty -func (s *Struct) Hash() (uint32, error) { - // Same algorithm as Tuple.hash, but with different primes. - var x, m uint32 = 8731, 9839 - for _, e := range s.entries { - namehash, _ := starlark.String(e.name).Hash() - x = x ^ 3*namehash - y, err := e.value.Hash() - if err != nil { - return 0, err - } - x = x ^ y*m - m += 7349 - } - return x, nil -} -func (s *Struct) Freeze() { - for _, e := range s.entries { - e.value.Freeze() - } -} - -func (x *Struct) Binary(op syntax.Token, y starlark.Value, side starlark.Side) (starlark.Value, error) { - if y, ok := y.(*Struct); ok && op == syntax.PLUS { - if side == starlark.Right { - x, y = y, x - } - - if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { - return nil, fmt.Errorf("in %s + %s: error comparing constructors: %v", - x.constructor, y.constructor, err) - } else if !eq { - return nil, fmt.Errorf("cannot add structs of different constructors: %s + %s", - x.constructor, y.constructor) - } - - z := make(starlark.StringDict, x.len()+y.len()) - for _, e := range x.entries { - z[e.name] = e.value - } - for _, e := range y.entries { - z[e.name] = e.value - } - - return FromStringDict(x.constructor, z), nil - } - return nil, nil // unhandled -} - -// Attr returns the value of the specified field. -func (s *Struct) Attr(name string) (starlark.Value, error) { - // Binary search the entries. - // This implementation is a specialization of - // sort.Search that avoids dynamic dispatch. - n := len(s.entries) - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) - if s.entries[h].name < name { - i = h + 1 - } else { - j = h - } - } - if i < n && s.entries[i].name == name { - return s.entries[i].value, nil - } - - var ctor string - if s.constructor != Default { - ctor = s.constructor.String() + " " - } - return nil, starlark.NoSuchAttrError( - fmt.Sprintf("%sstruct has no .%s attribute", ctor, name)) -} - -func (s *Struct) len() int { return len(s.entries) } - -// AttrNames returns a new sorted list of the struct fields. -func (s *Struct) AttrNames() []string { - names := make([]string, len(s.entries)) - for i, e := range s.entries { - names[i] = e.name - } - return names -} - -func (x *Struct) CompareSameType(op syntax.Token, y_ starlark.Value, depth int) (bool, error) { - y := y_.(*Struct) - switch op { - case syntax.EQL: - return structsEqual(x, y, depth) - case syntax.NEQ: - eq, err := structsEqual(x, y, depth) - return !eq, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func structsEqual(x, y *Struct, depth int) (bool, error) { - if x.len() != y.len() { - return false, nil - } - - if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { - return false, fmt.Errorf("error comparing struct constructors %v and %v: %v", - x.constructor, y.constructor, err) - } else if !eq { - return false, nil - } - - for i, n := 0, x.len(); i < n; i++ { - if x.entries[i].name != y.entries[i].name { - return false, nil - } else if eq, err := starlark.EqualDepth(x.entries[i].value, y.entries[i].value, depth-1); err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil -} diff --git a/vendor/go.starlark.net/syntax/grammar.txt b/vendor/go.starlark.net/syntax/grammar.txt deleted file mode 100644 index 7f5dfc811..000000000 --- a/vendor/go.starlark.net/syntax/grammar.txt +++ /dev/null @@ -1,129 +0,0 @@ - -Grammar of Starlark -================== - -File = {Statement | newline} eof . - -Statement = DefStmt | IfStmt | ForStmt | WhileStmt | SimpleStmt . - -DefStmt = 'def' identifier '(' [Parameters [',']] ')' ':' Suite . - -Parameters = Parameter {',' Parameter}. - -Parameter = identifier | identifier '=' Test | '*' | '*' identifier | '**' identifier . - -IfStmt = 'if' Test ':' Suite {'elif' Test ':' Suite} ['else' ':' Suite] . - -ForStmt = 'for' LoopVariables 'in' Expression ':' Suite . - -WhileStmt = 'while' Test ':' Suite . - -Suite = [newline indent {Statement} outdent] | SimpleStmt . - -SimpleStmt = SmallStmt {';' SmallStmt} [';'] '\n' . -# NOTE: '\n' optional at EOF - -SmallStmt = ReturnStmt - | BreakStmt | ContinueStmt | PassStmt - | AssignStmt - | ExprStmt - | LoadStmt - . - -ReturnStmt = 'return' [Expression] . -BreakStmt = 'break' . -ContinueStmt = 'continue' . -PassStmt = 'pass' . -AssignStmt = Expression ('=' | '+=' | '-=' | '*=' | '/=' | '//=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') Expression . -ExprStmt = Expression . - -LoadStmt = 'load' '(' string {',' [identifier '='] string} [','] ')' . - -Test = LambdaExpr - | IfExpr - | PrimaryExpr - | UnaryExpr - | BinaryExpr - . - -LambdaExpr = 'lambda' [Parameters] ':' Test . - -IfExpr = Test 'if' Test 'else' Test . - -PrimaryExpr = Operand - | PrimaryExpr DotSuffix - | PrimaryExpr CallSuffix - | PrimaryExpr SliceSuffix - . - -Operand = identifier - | int | float | string - | ListExpr | ListComp - | DictExpr | DictComp - | '(' [Expression [',']] ')' - | ('-' | '+') PrimaryExpr - . - -DotSuffix = '.' identifier . -CallSuffix = '(' [Arguments [',']] ')' . -SliceSuffix = '[' [Expression] [':' Test [':' Test]] ']' . - -Arguments = Argument {',' Argument} . -Argument = Test | identifier '=' Test | '*' Test | '**' Test . - -ListExpr = '[' [Expression [',']] ']' . -ListComp = '[' Test {CompClause} ']'. - -DictExpr = '{' [Entries [',']] '}' . -DictComp = '{' Entry {CompClause} '}' . -Entries = Entry {',' Entry} . -Entry = Test ':' Test . - -CompClause = 'for' LoopVariables 'in' Test | 'if' Test . - -UnaryExpr = 'not' Test . - -BinaryExpr = Test {Binop Test} . - -Binop = 'or' - | 'and' - | '==' | '!=' | '<' | '>' | '<=' | '>=' | 'in' | 'not' 'in' - | '|' - | '^' - | '&' - | '-' | '+' - | '*' | '%' | '/' | '//' - . - -Expression = Test {',' Test} . -# NOTE: trailing comma permitted only when within [...] or (...). - -LoopVariables = PrimaryExpr {',' PrimaryExpr} . - - -# Notation (similar to Go spec): -- lowercase and 'quoted' items are lexical tokens. -- Capitalized names denote grammar productions. -- (...) implies grouping -- x | y means either x or y. -- [x] means x is optional -- {x} means x is repeated zero or more times -- The end of each declaration is marked with a period. - -# Tokens -- spaces: newline, eof, indent, outdent. -- identifier. -- literals: string, int, float. -- plus all quoted tokens such as '+=', 'return'. - -# Notes: -- Ambiguity is resolved using operator precedence. -- The grammar does not enforce the legal order of params and args, - nor that the first compclause must be a 'for'. - -TODO: -- explain how the lexer generates indent, outdent, and newline tokens. -- why is unary NOT separated from unary - and +? -- the grammar is (mostly) in LL(1) style so, for example, - dot expressions are formed suffixes, not complete expressions, - which makes the spec harder to read. Reorganize into non-LL(1) form? diff --git a/vendor/go.starlark.net/syntax/parse.go b/vendor/go.starlark.net/syntax/parse.go deleted file mode 100644 index f4c8fff4d..000000000 --- a/vendor/go.starlark.net/syntax/parse.go +++ /dev/null @@ -1,1028 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// This file defines a recursive-descent parser for Starlark. -// The LL(1) grammar of Starlark and the names of many productions follow Python 2.7. -// -// TODO(adonovan): use syntax.Error more systematically throughout the -// package. Verify that error positions are correct using the -// chunkedfile mechanism. - -import "log" - -// Enable this flag to print the token stream and log.Fatal on the first error. -const debug = false - -// A Mode value is a set of flags (or 0) that controls optional parser functionality. -type Mode uint - -const ( - RetainComments Mode = 1 << iota // retain comments in AST; see Node.Comments -) - -// Parse parses the input data and returns the corresponding parse tree. -// -// If src != nil, ParseFile parses the source from src and the filename -// is only used when recording position information. -// The type of the argument for the src parameter must be string, -// []byte, io.Reader, or FilePortion. -// If src == nil, ParseFile parses the file specified by filename. -func Parse(filename string, src interface{}, mode Mode) (f *File, err error) { - in, err := newScanner(filename, src, mode&RetainComments != 0) - if err != nil { - return nil, err - } - p := parser{in: in} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - f = p.parseFile() - if f != nil { - f.Path = filename - } - p.assignComments(f) - return f, nil -} - -// ParseCompoundStmt parses a single compound statement: -// a blank line, a def, for, while, or if statement, or a -// semicolon-separated list of simple statements followed -// by a newline. These are the units on which the REPL operates. -// ParseCompoundStmt does not consume any following input. -// The parser calls the readline function each -// time it needs a new line of input. -func ParseCompoundStmt(filename string, readline func() ([]byte, error)) (f *File, err error) { - in, err := newScanner(filename, readline, false) - if err != nil { - return nil, err - } - - p := parser{in: in} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - - var stmts []Stmt - switch p.tok { - case DEF, IF, FOR, WHILE: - stmts = p.parseStmt(stmts) - case NEWLINE: - // blank line - default: - stmts = p.parseSimpleStmt(stmts, false) - // Require but don't consume newline, to avoid blocking again. - if p.tok != NEWLINE { - p.in.errorf(p.in.pos, "invalid syntax") - } - } - - return &File{Path: filename, Stmts: stmts}, nil -} - -// ParseExpr parses a Starlark expression. -// A comma-separated list of expressions is parsed as a tuple. -// See Parse for explanation of parameters. -func ParseExpr(filename string, src interface{}, mode Mode) (expr Expr, err error) { - in, err := newScanner(filename, src, mode&RetainComments != 0) - if err != nil { - return nil, err - } - p := parser{in: in} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - - // Use parseExpr, not parseTest, to permit an unparenthesized tuple. - expr = p.parseExpr(false) - - // A following newline (e.g. "f()\n") appears outside any brackets, - // on a non-blank line, and thus results in a NEWLINE token. - if p.tok == NEWLINE { - p.nextToken() - } - - if p.tok != EOF { - p.in.errorf(p.in.pos, "got %#v after expression, want EOF", p.tok) - } - p.assignComments(expr) - return expr, nil -} - -type parser struct { - in *scanner - tok Token - tokval tokenValue -} - -// nextToken advances the scanner and returns the position of the -// previous token. -func (p *parser) nextToken() Position { - oldpos := p.tokval.pos - p.tok = p.in.nextToken(&p.tokval) - // enable to see the token stream - if debug { - log.Printf("nextToken: %-20s%+v\n", p.tok, p.tokval.pos) - } - return oldpos -} - -// file_input = (NEWLINE | stmt)* EOF -func (p *parser) parseFile() *File { - var stmts []Stmt - for p.tok != EOF { - if p.tok == NEWLINE { - p.nextToken() - continue - } - stmts = p.parseStmt(stmts) - } - return &File{Stmts: stmts} -} - -func (p *parser) parseStmt(stmts []Stmt) []Stmt { - if p.tok == DEF { - return append(stmts, p.parseDefStmt()) - } else if p.tok == IF { - return append(stmts, p.parseIfStmt()) - } else if p.tok == FOR { - return append(stmts, p.parseForStmt()) - } else if p.tok == WHILE { - return append(stmts, p.parseWhileStmt()) - } - return p.parseSimpleStmt(stmts, true) -} - -func (p *parser) parseDefStmt() Stmt { - defpos := p.nextToken() // consume DEF - id := p.parseIdent() - p.consume(LPAREN) - params := p.parseParams() - p.consume(RPAREN) - p.consume(COLON) - body := p.parseSuite() - return &DefStmt{ - Def: defpos, - Name: id, - Params: params, - Body: body, - } -} - -func (p *parser) parseIfStmt() Stmt { - ifpos := p.nextToken() // consume IF - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - ifStmt := &IfStmt{ - If: ifpos, - Cond: cond, - True: body, - } - tail := ifStmt - for p.tok == ELIF { - elifpos := p.nextToken() // consume ELIF - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - elif := &IfStmt{ - If: elifpos, - Cond: cond, - True: body, - } - tail.ElsePos = elifpos - tail.False = []Stmt{elif} - tail = elif - } - if p.tok == ELSE { - tail.ElsePos = p.nextToken() // consume ELSE - p.consume(COLON) - tail.False = p.parseSuite() - } - return ifStmt -} - -func (p *parser) parseForStmt() Stmt { - forpos := p.nextToken() // consume FOR - vars := p.parseForLoopVariables() - p.consume(IN) - x := p.parseExpr(false) - p.consume(COLON) - body := p.parseSuite() - return &ForStmt{ - For: forpos, - Vars: vars, - X: x, - Body: body, - } -} - -func (p *parser) parseWhileStmt() Stmt { - whilepos := p.nextToken() // consume WHILE - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - return &WhileStmt{ - While: whilepos, - Cond: cond, - Body: body, - } -} - -// Equivalent to 'exprlist' production in Python grammar. -// -// loop_variables = primary_with_suffix (COMMA primary_with_suffix)* COMMA? -func (p *parser) parseForLoopVariables() Expr { - // Avoid parseExpr because it would consume the IN token - // following x in "for x in y: ...". - v := p.parsePrimaryWithSuffix() - if p.tok != COMMA { - return v - } - - list := []Expr{v} - for p.tok == COMMA { - p.nextToken() - if terminatesExprList(p.tok) { - break - } - list = append(list, p.parsePrimaryWithSuffix()) - } - return &TupleExpr{List: list} -} - -// simple_stmt = small_stmt (SEMI small_stmt)* SEMI? NEWLINE -// In REPL mode, it does not consume the NEWLINE. -func (p *parser) parseSimpleStmt(stmts []Stmt, consumeNL bool) []Stmt { - for { - stmts = append(stmts, p.parseSmallStmt()) - if p.tok != SEMI { - break - } - p.nextToken() // consume SEMI - if p.tok == NEWLINE || p.tok == EOF { - break - } - } - // EOF without NEWLINE occurs in `if x: pass`, for example. - if p.tok != EOF && consumeNL { - p.consume(NEWLINE) - } - - return stmts -} - -// small_stmt = RETURN expr? -// | PASS | BREAK | CONTINUE -// | LOAD ... -// | expr ('=' | '+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') expr // assign -// | expr -func (p *parser) parseSmallStmt() Stmt { - switch p.tok { - case RETURN: - pos := p.nextToken() // consume RETURN - var result Expr - if p.tok != EOF && p.tok != NEWLINE && p.tok != SEMI { - result = p.parseExpr(false) - } - return &ReturnStmt{Return: pos, Result: result} - - case BREAK, CONTINUE, PASS: - tok := p.tok - pos := p.nextToken() // consume it - return &BranchStmt{Token: tok, TokenPos: pos} - - case LOAD: - return p.parseLoadStmt() - } - - // Assignment - x := p.parseExpr(false) - switch p.tok { - case EQ, PLUS_EQ, MINUS_EQ, STAR_EQ, SLASH_EQ, SLASHSLASH_EQ, PERCENT_EQ, AMP_EQ, PIPE_EQ, CIRCUMFLEX_EQ, LTLT_EQ, GTGT_EQ: - op := p.tok - pos := p.nextToken() // consume op - rhs := p.parseExpr(false) - return &AssignStmt{OpPos: pos, Op: op, LHS: x, RHS: rhs} - } - - // Expression statement (e.g. function call, doc string). - return &ExprStmt{X: x} -} - -// stmt = LOAD '(' STRING {',' (IDENT '=')? STRING} [','] ')' -func (p *parser) parseLoadStmt() *LoadStmt { - loadPos := p.nextToken() // consume LOAD - lparen := p.consume(LPAREN) - - if p.tok != STRING { - p.in.errorf(p.in.pos, "first operand of load statement must be a string literal") - } - module := p.parsePrimary().(*Literal) - - var from, to []*Ident - for p.tok != RPAREN && p.tok != EOF { - p.consume(COMMA) - if p.tok == RPAREN { - break // allow trailing comma - } - switch p.tok { - case STRING: - // load("module", "id") - // To name is same as original. - lit := p.parsePrimary().(*Literal) - id := &Ident{ - NamePos: lit.TokenPos.add(`"`), - Name: lit.Value.(string), - } - to = append(to, id) - from = append(from, id) - - case IDENT: - // load("module", to="from") - id := p.parseIdent() - to = append(to, id) - if p.tok != EQ { - p.in.errorf(p.in.pos, `load operand must be "%[1]s" or %[1]s="originalname" (want '=' after %[1]s)`, id.Name) - } - p.consume(EQ) - if p.tok != STRING { - p.in.errorf(p.in.pos, `original name of loaded symbol must be quoted: %s="originalname"`, id.Name) - } - lit := p.parsePrimary().(*Literal) - from = append(from, &Ident{ - NamePos: lit.TokenPos.add(`"`), - Name: lit.Value.(string), - }) - - case RPAREN: - p.in.errorf(p.in.pos, "trailing comma in load statement") - - default: - p.in.errorf(p.in.pos, `load operand must be "name" or localname="name" (got %#v)`, p.tok) - } - } - rparen := p.consume(RPAREN) - - if len(to) == 0 { - p.in.errorf(lparen, "load statement must import at least 1 symbol") - } - return &LoadStmt{ - Load: loadPos, - Module: module, - To: to, - From: from, - Rparen: rparen, - } -} - -// suite is typically what follows a COLON (e.g. after DEF or FOR). -// suite = simple_stmt | NEWLINE INDENT stmt+ OUTDENT -func (p *parser) parseSuite() []Stmt { - if p.tok == NEWLINE { - p.nextToken() // consume NEWLINE - p.consume(INDENT) - var stmts []Stmt - for p.tok != OUTDENT && p.tok != EOF { - stmts = p.parseStmt(stmts) - } - p.consume(OUTDENT) - return stmts - } - - return p.parseSimpleStmt(nil, true) -} - -func (p *parser) parseIdent() *Ident { - if p.tok != IDENT { - p.in.error(p.in.pos, "not an identifier") - } - id := &Ident{ - NamePos: p.tokval.pos, - Name: p.tokval.raw, - } - p.nextToken() - return id -} - -func (p *parser) consume(t Token) Position { - if p.tok != t { - p.in.errorf(p.in.pos, "got %#v, want %#v", p.tok, t) - } - return p.nextToken() -} - -// params = (param COMMA)* param COMMA? -// | -// -// param = IDENT -// | IDENT EQ test -// | STAR -// | STAR IDENT -// | STARSTAR IDENT -// -// parseParams parses a parameter list. The resulting expressions are of the form: -// -// *Ident x -// *Binary{Op: EQ, X: *Ident, Y: Expr} x=y -// *Unary{Op: STAR} * -// *Unary{Op: STAR, X: *Ident} *args -// *Unary{Op: STARSTAR, X: *Ident} **kwargs -func (p *parser) parseParams() []Expr { - var params []Expr - for p.tok != RPAREN && p.tok != COLON && p.tok != EOF { - if len(params) > 0 { - p.consume(COMMA) - } - if p.tok == RPAREN { - break - } - - // * or *args or **kwargs - if p.tok == STAR || p.tok == STARSTAR { - op := p.tok - pos := p.nextToken() - var x Expr - if op == STARSTAR || p.tok == IDENT { - x = p.parseIdent() - } - params = append(params, &UnaryExpr{ - OpPos: pos, - Op: op, - X: x, - }) - continue - } - - // IDENT - // IDENT = test - id := p.parseIdent() - if p.tok == EQ { // default value - eq := p.nextToken() - dflt := p.parseTest() - params = append(params, &BinaryExpr{ - X: id, - OpPos: eq, - Op: EQ, - Y: dflt, - }) - continue - } - - params = append(params, id) - } - return params -} - -// parseExpr parses an expression, possible consisting of a -// comma-separated list of 'test' expressions. -// -// In many cases we must use parseTest to avoid ambiguity such as -// f(x, y) vs. f((x, y)). -func (p *parser) parseExpr(inParens bool) Expr { - x := p.parseTest() - if p.tok != COMMA { - return x - } - - // tuple - exprs := p.parseExprs([]Expr{x}, inParens) - return &TupleExpr{List: exprs} -} - -// parseExprs parses a comma-separated list of expressions, starting with the comma. -// It is used to parse tuples and list elements. -// expr_list = (',' expr)* ','? -func (p *parser) parseExprs(exprs []Expr, allowTrailingComma bool) []Expr { - for p.tok == COMMA { - pos := p.nextToken() - if terminatesExprList(p.tok) { - if !allowTrailingComma { - p.in.error(pos, "unparenthesized tuple with trailing comma") - } - break - } - exprs = append(exprs, p.parseTest()) - } - return exprs -} - -// parseTest parses a 'test', a single-component expression. -func (p *parser) parseTest() Expr { - if p.tok == LAMBDA { - return p.parseLambda(true) - } - - x := p.parseTestPrec(0) - - // conditional expression (t IF cond ELSE f) - if p.tok == IF { - ifpos := p.nextToken() - cond := p.parseTestPrec(0) - if p.tok != ELSE { - p.in.error(ifpos, "conditional expression without else clause") - } - elsepos := p.nextToken() - else_ := p.parseTest() - return &CondExpr{If: ifpos, Cond: cond, True: x, ElsePos: elsepos, False: else_} - } - - return x -} - -// parseTestNoCond parses a a single-component expression without -// consuming a trailing 'if expr else expr'. -func (p *parser) parseTestNoCond() Expr { - if p.tok == LAMBDA { - return p.parseLambda(false) - } - return p.parseTestPrec(0) -} - -// parseLambda parses a lambda expression. -// The allowCond flag allows the body to be an 'a if b else c' conditional. -func (p *parser) parseLambda(allowCond bool) Expr { - lambda := p.nextToken() - var params []Expr - if p.tok != COLON { - params = p.parseParams() - } - p.consume(COLON) - - var body Expr - if allowCond { - body = p.parseTest() - } else { - body = p.parseTestNoCond() - } - - return &LambdaExpr{ - Lambda: lambda, - Params: params, - Body: body, - } -} - -func (p *parser) parseTestPrec(prec int) Expr { - if prec >= len(preclevels) { - return p.parsePrimaryWithSuffix() - } - - // expr = NOT expr - if p.tok == NOT && prec == int(precedence[NOT]) { - pos := p.nextToken() - x := p.parseTestPrec(prec) - return &UnaryExpr{ - OpPos: pos, - Op: NOT, - X: x, - } - } - - return p.parseBinopExpr(prec) -} - -// expr = test (OP test)* -// Uses precedence climbing; see http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm#climbing. -func (p *parser) parseBinopExpr(prec int) Expr { - x := p.parseTestPrec(prec + 1) - for first := true; ; first = false { - if p.tok == NOT { - p.nextToken() // consume NOT - // In this context, NOT must be followed by IN. - // Replace NOT IN by a single NOT_IN token. - if p.tok != IN { - p.in.errorf(p.in.pos, "got %#v, want in", p.tok) - } - p.tok = NOT_IN - } - - // Binary operator of specified precedence? - opprec := int(precedence[p.tok]) - if opprec < prec { - return x - } - - // Comparisons are non-associative. - if !first && opprec == int(precedence[EQL]) { - p.in.errorf(p.in.pos, "%s does not associate with %s (use parens)", - x.(*BinaryExpr).Op, p.tok) - } - - op := p.tok - pos := p.nextToken() - y := p.parseTestPrec(opprec + 1) - x = &BinaryExpr{OpPos: pos, Op: op, X: x, Y: y} - } -} - -// precedence maps each operator to its precedence (0-7), or -1 for other tokens. -var precedence [maxToken]int8 - -// preclevels groups operators of equal precedence. -// Comparisons are nonassociative; other binary operators associate to the left. -// Unary MINUS, unary PLUS, and TILDE have higher precedence so are handled in parsePrimary. -// See https://github.com/google/starlark-go/blob/master/doc/spec.md#binary-operators -var preclevels = [...][]Token{ - {OR}, // or - {AND}, // and - {NOT}, // not (unary) - {EQL, NEQ, LT, GT, LE, GE, IN, NOT_IN}, // == != < > <= >= in not in - {PIPE}, // | - {CIRCUMFLEX}, // ^ - {AMP}, // & - {LTLT, GTGT}, // << >> - {MINUS, PLUS}, // - - {STAR, PERCENT, SLASH, SLASHSLASH}, // * % / // -} - -func init() { - // populate precedence table - for i := range precedence { - precedence[i] = -1 - } - for level, tokens := range preclevels { - for _, tok := range tokens { - precedence[tok] = int8(level) - } - } -} - -// primary_with_suffix = primary -// | primary '.' IDENT -// | primary slice_suffix -// | primary call_suffix -func (p *parser) parsePrimaryWithSuffix() Expr { - x := p.parsePrimary() - for { - switch p.tok { - case DOT: - dot := p.nextToken() - id := p.parseIdent() - x = &DotExpr{Dot: dot, X: x, Name: id} - case LBRACK: - x = p.parseSliceSuffix(x) - case LPAREN: - x = p.parseCallSuffix(x) - default: - return x - } - } -} - -// slice_suffix = '[' expr? ':' expr? ':' expr? ']' -func (p *parser) parseSliceSuffix(x Expr) Expr { - lbrack := p.nextToken() - var lo, hi, step Expr - if p.tok != COLON { - y := p.parseExpr(false) - - // index x[y] - if p.tok == RBRACK { - rbrack := p.nextToken() - return &IndexExpr{X: x, Lbrack: lbrack, Y: y, Rbrack: rbrack} - } - - lo = y - } - - // slice or substring x[lo:hi:step] - if p.tok == COLON { - p.nextToken() - if p.tok != COLON && p.tok != RBRACK { - hi = p.parseTest() - } - } - if p.tok == COLON { - p.nextToken() - if p.tok != RBRACK { - step = p.parseTest() - } - } - rbrack := p.consume(RBRACK) - return &SliceExpr{X: x, Lbrack: lbrack, Lo: lo, Hi: hi, Step: step, Rbrack: rbrack} -} - -// call_suffix = '(' arg_list? ')' -func (p *parser) parseCallSuffix(fn Expr) Expr { - lparen := p.consume(LPAREN) - var rparen Position - var args []Expr - if p.tok == RPAREN { - rparen = p.nextToken() - } else { - args = p.parseArgs() - rparen = p.consume(RPAREN) - } - return &CallExpr{Fn: fn, Lparen: lparen, Args: args, Rparen: rparen} -} - -// parseArgs parses a list of actual parameter values (arguments). -// It mirrors the structure of parseParams. -// arg_list = ((arg COMMA)* arg COMMA?)? -func (p *parser) parseArgs() []Expr { - var args []Expr - for p.tok != RPAREN && p.tok != EOF { - if len(args) > 0 { - p.consume(COMMA) - } - if p.tok == RPAREN { - break - } - - // *args or **kwargs - if p.tok == STAR || p.tok == STARSTAR { - op := p.tok - pos := p.nextToken() - x := p.parseTest() - args = append(args, &UnaryExpr{ - OpPos: pos, - Op: op, - X: x, - }) - continue - } - - // We use a different strategy from Bazel here to stay within LL(1). - // Instead of looking ahead two tokens (IDENT, EQ) we parse - // 'test = test' then check that the first was an IDENT. - x := p.parseTest() - - if p.tok == EQ { - // name = value - if _, ok := x.(*Ident); !ok { - p.in.errorf(p.in.pos, "keyword argument must have form name=expr") - } - eq := p.nextToken() - y := p.parseTest() - x = &BinaryExpr{ - X: x, - OpPos: eq, - Op: EQ, - Y: y, - } - } - - args = append(args, x) - } - return args -} - -// primary = IDENT -// | INT | FLOAT | STRING | BYTES -// | '[' ... // list literal or comprehension -// | '{' ... // dict literal or comprehension -// | '(' ... // tuple or parenthesized expression -// | ('-'|'+'|'~') primary_with_suffix -func (p *parser) parsePrimary() Expr { - switch p.tok { - case IDENT: - return p.parseIdent() - - case INT, FLOAT, STRING, BYTES: - var val interface{} - tok := p.tok - switch tok { - case INT: - if p.tokval.bigInt != nil { - val = p.tokval.bigInt - } else { - val = p.tokval.int - } - case FLOAT: - val = p.tokval.float - case STRING, BYTES: - val = p.tokval.string - } - raw := p.tokval.raw - pos := p.nextToken() - return &Literal{Token: tok, TokenPos: pos, Raw: raw, Value: val} - - case LBRACK: - return p.parseList() - - case LBRACE: - return p.parseDict() - - case LPAREN: - lparen := p.nextToken() - if p.tok == RPAREN { - // empty tuple - rparen := p.nextToken() - return &TupleExpr{Lparen: lparen, Rparen: rparen} - } - e := p.parseExpr(true) // allow trailing comma - rparen := p.consume(RPAREN) - return &ParenExpr{ - Lparen: lparen, - X: e, - Rparen: rparen, - } - - case MINUS, PLUS, TILDE: // unary - tok := p.tok - pos := p.nextToken() - x := p.parsePrimaryWithSuffix() - return &UnaryExpr{ - OpPos: pos, - Op: tok, - X: x, - } - } - p.in.errorf(p.in.pos, "got %#v, want primary expression", p.tok) - panic("unreachable") -} - -// list = '[' ']' -// | '[' expr ']' -// | '[' expr expr_list ']' -// | '[' expr (FOR loop_variables IN expr)+ ']' -func (p *parser) parseList() Expr { - lbrack := p.nextToken() - if p.tok == RBRACK { - // empty List - rbrack := p.nextToken() - return &ListExpr{Lbrack: lbrack, Rbrack: rbrack} - } - - x := p.parseTest() - - if p.tok == FOR { - // list comprehension - return p.parseComprehensionSuffix(lbrack, x, RBRACK) - } - - exprs := []Expr{x} - if p.tok == COMMA { - // multi-item list literal - exprs = p.parseExprs(exprs, true) // allow trailing comma - } - - rbrack := p.consume(RBRACK) - return &ListExpr{Lbrack: lbrack, List: exprs, Rbrack: rbrack} -} - -// dict = '{' '}' -// | '{' dict_entry_list '}' -// | '{' dict_entry FOR loop_variables IN expr '}' -func (p *parser) parseDict() Expr { - lbrace := p.nextToken() - if p.tok == RBRACE { - // empty dict - rbrace := p.nextToken() - return &DictExpr{Lbrace: lbrace, Rbrace: rbrace} - } - - x := p.parseDictEntry() - - if p.tok == FOR { - // dict comprehension - return p.parseComprehensionSuffix(lbrace, x, RBRACE) - } - - entries := []Expr{x} - for p.tok == COMMA { - p.nextToken() - if p.tok == RBRACE { - break - } - entries = append(entries, p.parseDictEntry()) - } - - rbrace := p.consume(RBRACE) - return &DictExpr{Lbrace: lbrace, List: entries, Rbrace: rbrace} -} - -// dict_entry = test ':' test -func (p *parser) parseDictEntry() *DictEntry { - k := p.parseTest() - colon := p.consume(COLON) - v := p.parseTest() - return &DictEntry{Key: k, Colon: colon, Value: v} -} - -// comp_suffix = FOR loopvars IN expr comp_suffix -// | IF expr comp_suffix -// | ']' or ')' (end) -// -// There can be multiple FOR/IF clauses; the first is always a FOR. -func (p *parser) parseComprehensionSuffix(lbrace Position, body Expr, endBrace Token) Expr { - var clauses []Node - for p.tok != endBrace { - if p.tok == FOR { - pos := p.nextToken() - vars := p.parseForLoopVariables() - in := p.consume(IN) - // Following Python 3, the operand of IN cannot be: - // - a conditional expression ('x if y else z'), - // due to conflicts in Python grammar - // ('if' is used by the comprehension); - // - a lambda expression - // - an unparenthesized tuple. - x := p.parseTestPrec(0) - clauses = append(clauses, &ForClause{For: pos, Vars: vars, In: in, X: x}) - } else if p.tok == IF { - pos := p.nextToken() - cond := p.parseTestNoCond() - clauses = append(clauses, &IfClause{If: pos, Cond: cond}) - } else { - p.in.errorf(p.in.pos, "got %#v, want '%s', for, or if", p.tok, endBrace) - } - } - rbrace := p.nextToken() - - return &Comprehension{ - Curly: endBrace == RBRACE, - Lbrack: lbrace, - Body: body, - Clauses: clauses, - Rbrack: rbrace, - } -} - -func terminatesExprList(tok Token) bool { - switch tok { - case EOF, NEWLINE, EQ, RBRACE, RBRACK, RPAREN, SEMI: - return true - } - return false -} - -// Comment assignment. -// We build two lists of all subnodes, preorder and postorder. -// The preorder list is ordered by start location, with outer nodes first. -// The postorder list is ordered by end location, with outer nodes last. -// We use the preorder list to assign each whole-line comment to the syntax -// immediately following it, and we use the postorder list to assign each -// end-of-line comment to the syntax immediately preceding it. - -// flattenAST returns the list of AST nodes, both in prefix order and in postfix -// order. -func flattenAST(root Node) (pre, post []Node) { - stack := []Node{} - Walk(root, func(n Node) bool { - if n != nil { - pre = append(pre, n) - stack = append(stack, n) - } else { - post = append(post, stack[len(stack)-1]) - stack = stack[:len(stack)-1] - } - return true - }) - return pre, post -} - -// assignComments attaches comments to nearby syntax. -func (p *parser) assignComments(n Node) { - // Leave early if there are no comments - if len(p.in.lineComments)+len(p.in.suffixComments) == 0 { - return - } - - pre, post := flattenAST(n) - - // Assign line comments to syntax immediately following. - line := p.in.lineComments - for _, x := range pre { - start, _ := x.Span() - - switch x.(type) { - case *File: - continue - } - - for len(line) > 0 && !start.isBefore(line[0].Start) { - x.AllocComments() - x.Comments().Before = append(x.Comments().Before, line[0]) - line = line[1:] - } - } - - // Remaining line comments go at end of file. - if len(line) > 0 { - n.AllocComments() - n.Comments().After = append(n.Comments().After, line...) - } - - // Assign suffix comments to syntax immediately before. - suffix := p.in.suffixComments - for i := len(post) - 1; i >= 0; i-- { - x := post[i] - - // Do not assign suffix comments to file - switch x.(type) { - case *File: - continue - } - - _, end := x.Span() - if len(suffix) > 0 && end.isBefore(suffix[len(suffix)-1].Start) { - x.AllocComments() - x.Comments().Suffix = append(x.Comments().Suffix, suffix[len(suffix)-1]) - suffix = suffix[:len(suffix)-1] - } - } -} diff --git a/vendor/go.starlark.net/syntax/quote.go b/vendor/go.starlark.net/syntax/quote.go deleted file mode 100644 index 741e106ad..000000000 --- a/vendor/go.starlark.net/syntax/quote.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Starlark quoted string utilities. - -import ( - "fmt" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// unesc maps single-letter chars following \ to their actual values. -var unesc = [256]byte{ - 'a': '\a', - 'b': '\b', - 'f': '\f', - 'n': '\n', - 'r': '\r', - 't': '\t', - 'v': '\v', - '\\': '\\', - '\'': '\'', - '"': '"', -} - -// esc maps escape-worthy bytes to the char that should follow \. -var esc = [256]byte{ - '\a': 'a', - '\b': 'b', - '\f': 'f', - '\n': 'n', - '\r': 'r', - '\t': 't', - '\v': 'v', - '\\': '\\', - '\'': '\'', - '"': '"', -} - -// unquote unquotes the quoted string, returning the actual -// string value, whether the original was triple-quoted, -// whether it was a byte string, and an error describing invalid input. -func unquote(quoted string) (s string, triple, isByte bool, err error) { - // Check for raw prefix: means don't interpret the inner \. - raw := false - if strings.HasPrefix(quoted, "r") { - raw = true - quoted = quoted[1:] - } - // Check for bytes prefix. - if strings.HasPrefix(quoted, "b") { - isByte = true - quoted = quoted[1:] - } - - if len(quoted) < 2 { - err = fmt.Errorf("string literal too short") - return - } - - if quoted[0] != '"' && quoted[0] != '\'' || quoted[0] != quoted[len(quoted)-1] { - err = fmt.Errorf("string literal has invalid quotes") - return - } - - // Check for triple quoted string. - quote := quoted[0] - if len(quoted) >= 6 && quoted[1] == quote && quoted[2] == quote && quoted[:3] == quoted[len(quoted)-3:] { - triple = true - quoted = quoted[3 : len(quoted)-3] - } else { - quoted = quoted[1 : len(quoted)-1] - } - - // Now quoted is the quoted data, but no quotes. - // If we're in raw mode or there are no escapes or - // carriage returns, we're done. - var unquoteChars string - if raw { - unquoteChars = "\r" - } else { - unquoteChars = "\\\r" - } - if !strings.ContainsAny(quoted, unquoteChars) { - s = quoted - return - } - - // Otherwise process quoted string. - // Each iteration processes one escape sequence along with the - // plain text leading up to it. - buf := new(strings.Builder) - for { - // Remove prefix before escape sequence. - i := strings.IndexAny(quoted, unquoteChars) - if i < 0 { - i = len(quoted) - } - buf.WriteString(quoted[:i]) - quoted = quoted[i:] - - if len(quoted) == 0 { - break - } - - // Process carriage return. - if quoted[0] == '\r' { - buf.WriteByte('\n') - if len(quoted) > 1 && quoted[1] == '\n' { - quoted = quoted[2:] - } else { - quoted = quoted[1:] - } - continue - } - - // Process escape sequence. - if len(quoted) == 1 { - err = fmt.Errorf(`truncated escape sequence \`) - return - } - - switch quoted[1] { - default: - // In Starlark, like Go, a backslash must escape something. - // (Python still treats unnecessary backslashes literally, - // but since 3.6 has emitted a deprecation warning.) - err = fmt.Errorf("invalid escape sequence \\%c", quoted[1]) - return - - case '\n': - // Ignore the escape and the line break. - quoted = quoted[2:] - - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '\'', '"': - // One-char escape. - // Escapes are allowed for both kinds of quotation - // mark, not just the kind in use. - buf.WriteByte(unesc[quoted[1]]) - quoted = quoted[2:] - - case '0', '1', '2', '3', '4', '5', '6', '7': - // Octal escape, up to 3 digits, \OOO. - n := int(quoted[1] - '0') - quoted = quoted[2:] - for i := 1; i < 3; i++ { - if len(quoted) == 0 || quoted[0] < '0' || '7' < quoted[0] { - break - } - n = n*8 + int(quoted[0]-'0') - quoted = quoted[1:] - } - if !isByte && n > 127 { - err = fmt.Errorf(`non-ASCII octal escape \%o (use \u%04X for the UTF-8 encoding of U+%04X)`, n, n, n) - return - } - if n >= 256 { - // NOTE: Python silently discards the high bit, - // so that '\541' == '\141' == 'a'. - // Let's see if we can avoid doing that in BUILD files. - err = fmt.Errorf(`invalid escape sequence \%03o`, n) - return - } - buf.WriteByte(byte(n)) - - case 'x': - // Hexadecimal escape, exactly 2 digits, \xXX. [0-127] - if len(quoted) < 4 { - err = fmt.Errorf(`truncated escape sequence %s`, quoted) - return - } - n, err1 := strconv.ParseUint(quoted[2:4], 16, 0) - if err1 != nil { - err = fmt.Errorf(`invalid escape sequence %s`, quoted[:4]) - return - } - if !isByte && n > 127 { - err = fmt.Errorf(`non-ASCII hex escape %s (use \u%04X for the UTF-8 encoding of U+%04X)`, - quoted[:4], n, n) - return - } - buf.WriteByte(byte(n)) - quoted = quoted[4:] - - case 'u', 'U': - // Unicode code point, 4 (\uXXXX) or 8 (\UXXXXXXXX) hex digits. - sz := 6 - if quoted[1] == 'U' { - sz = 10 - } - if len(quoted) < sz { - err = fmt.Errorf(`truncated escape sequence %s`, quoted) - return - } - n, err1 := strconv.ParseUint(quoted[2:sz], 16, 0) - if err1 != nil { - err = fmt.Errorf(`invalid escape sequence %s`, quoted[:sz]) - return - } - if n > unicode.MaxRune { - err = fmt.Errorf(`code point out of range: %s (max \U%08x)`, - quoted[:sz], n) - return - } - // As in Go, surrogates are disallowed. - if 0xD800 <= n && n < 0xE000 { - err = fmt.Errorf(`invalid Unicode code point U+%04X`, n) - return - } - buf.WriteRune(rune(n)) - quoted = quoted[sz:] - } - } - - s = buf.String() - return -} - -// indexByte returns the index of the first instance of b in s, or else -1. -func indexByte(s string, b byte) int { - for i := 0; i < len(s); i++ { - if s[i] == b { - return i - } - } - return -1 -} - -// Quote returns a Starlark literal that denotes s. -// If b, it returns a bytes literal. -func Quote(s string, b bool) string { - const hex = "0123456789abcdef" - var runeTmp [utf8.UTFMax]byte - - buf := make([]byte, 0, 3*len(s)/2) - if b { - buf = append(buf, 'b') - } - buf = append(buf, '"') - for width := 0; len(s) > 0; s = s[width:] { - r := rune(s[0]) - width = 1 - if r >= utf8.RuneSelf { - r, width = utf8.DecodeRuneInString(s) - } - if width == 1 && r == utf8.RuneError { - // String (!b) literals accept \xXX escapes only for ASCII, - // but we must use them here to represent invalid bytes. - // The result is not a legal literal. - buf = append(buf, `\x`...) - buf = append(buf, hex[s[0]>>4]) - buf = append(buf, hex[s[0]&0xF]) - continue - } - if r == '"' || r == '\\' { // always backslashed - buf = append(buf, '\\') - buf = append(buf, byte(r)) - continue - } - if strconv.IsPrint(r) { - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - continue - } - switch r { - case '\a': - buf = append(buf, `\a`...) - case '\b': - buf = append(buf, `\b`...) - case '\f': - buf = append(buf, `\f`...) - case '\n': - buf = append(buf, `\n`...) - case '\r': - buf = append(buf, `\r`...) - case '\t': - buf = append(buf, `\t`...) - case '\v': - buf = append(buf, `\v`...) - default: - switch { - case r < ' ' || r == 0x7f: - buf = append(buf, `\x`...) - buf = append(buf, hex[byte(r)>>4]) - buf = append(buf, hex[byte(r)&0xF]) - case r > utf8.MaxRune: - r = 0xFFFD - fallthrough - case r < 0x10000: - buf = append(buf, `\u`...) - for s := 12; s >= 0; s -= 4 { - buf = append(buf, hex[r>>uint(s)&0xF]) - } - default: - buf = append(buf, `\U`...) - for s := 28; s >= 0; s -= 4 { - buf = append(buf, hex[r>>uint(s)&0xF]) - } - } - } - } - buf = append(buf, '"') - return string(buf) -} diff --git a/vendor/go.starlark.net/syntax/scan.go b/vendor/go.starlark.net/syntax/scan.go deleted file mode 100644 index bb4165e9d..000000000 --- a/vendor/go.starlark.net/syntax/scan.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// A lexical scanner for Starlark. - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math/big" - "os" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// A Token represents a Starlark lexical token. -type Token int8 - -const ( - ILLEGAL Token = iota - EOF - - NEWLINE - INDENT - OUTDENT - - // Tokens with values - IDENT // x - INT // 123 - FLOAT // 1.23e45 - STRING // "foo" or 'foo' or '''foo''' or r'foo' or r"foo" - BYTES // b"foo", etc - - // Punctuation - PLUS // + - MINUS // - - STAR // * - SLASH // / - SLASHSLASH // // - PERCENT // % - AMP // & - PIPE // | - CIRCUMFLEX // ^ - LTLT // << - GTGT // >> - TILDE // ~ - DOT // . - COMMA // , - EQ // = - SEMI // ; - COLON // : - LPAREN // ( - RPAREN // ) - LBRACK // [ - RBRACK // ] - LBRACE // { - RBRACE // } - LT // < - GT // > - GE // >= - LE // <= - EQL // == - NEQ // != - PLUS_EQ // += (keep order consistent with PLUS..GTGT) - MINUS_EQ // -= - STAR_EQ // *= - SLASH_EQ // /= - SLASHSLASH_EQ // //= - PERCENT_EQ // %= - AMP_EQ // &= - PIPE_EQ // |= - CIRCUMFLEX_EQ // ^= - LTLT_EQ // <<= - GTGT_EQ // >>= - STARSTAR // ** - - // Keywords - AND - BREAK - CONTINUE - DEF - ELIF - ELSE - FOR - IF - IN - LAMBDA - LOAD - NOT - NOT_IN // synthesized by parser from NOT IN - OR - PASS - RETURN - WHILE - - maxToken -) - -func (tok Token) String() string { return tokenNames[tok] } - -// GoString is like String but quotes punctuation tokens. -// Use Sprintf("%#v", tok) when constructing error messages. -func (tok Token) GoString() string { - if tok >= PLUS && tok <= STARSTAR { - return "'" + tokenNames[tok] + "'" - } - return tokenNames[tok] -} - -var tokenNames = [...]string{ - ILLEGAL: "illegal token", - EOF: "end of file", - NEWLINE: "newline", - INDENT: "indent", - OUTDENT: "outdent", - IDENT: "identifier", - INT: "int literal", - FLOAT: "float literal", - STRING: "string literal", - PLUS: "+", - MINUS: "-", - STAR: "*", - SLASH: "/", - SLASHSLASH: "//", - PERCENT: "%", - AMP: "&", - PIPE: "|", - CIRCUMFLEX: "^", - LTLT: "<<", - GTGT: ">>", - TILDE: "~", - DOT: ".", - COMMA: ",", - EQ: "=", - SEMI: ";", - COLON: ":", - LPAREN: "(", - RPAREN: ")", - LBRACK: "[", - RBRACK: "]", - LBRACE: "{", - RBRACE: "}", - LT: "<", - GT: ">", - GE: ">=", - LE: "<=", - EQL: "==", - NEQ: "!=", - PLUS_EQ: "+=", - MINUS_EQ: "-=", - STAR_EQ: "*=", - SLASH_EQ: "/=", - SLASHSLASH_EQ: "//=", - PERCENT_EQ: "%=", - AMP_EQ: "&=", - PIPE_EQ: "|=", - CIRCUMFLEX_EQ: "^=", - LTLT_EQ: "<<=", - GTGT_EQ: ">>=", - STARSTAR: "**", - AND: "and", - BREAK: "break", - CONTINUE: "continue", - DEF: "def", - ELIF: "elif", - ELSE: "else", - FOR: "for", - IF: "if", - IN: "in", - LAMBDA: "lambda", - LOAD: "load", - NOT: "not", - NOT_IN: "not in", - OR: "or", - PASS: "pass", - RETURN: "return", - WHILE: "while", -} - -// A FilePortion describes the content of a portion of a file. -// Callers may provide a FilePortion for the src argument of Parse -// when the desired initial line and column numbers are not (1, 1), -// such as when an expression is parsed from within larger file. -type FilePortion struct { - Content []byte - FirstLine, FirstCol int32 -} - -// A Position describes the location of a rune of input. -type Position struct { - file *string // filename (indirect for compactness) - Line int32 // 1-based line number; 0 if line unknown - Col int32 // 1-based column (rune) number; 0 if column unknown -} - -// IsValid reports whether the position is valid. -func (p Position) IsValid() bool { return p.file != nil } - -// Filename returns the name of the file containing this position. -func (p Position) Filename() string { - if p.file != nil { - return *p.file - } - return "" -} - -// MakePosition returns position with the specified components. -func MakePosition(file *string, line, col int32) Position { return Position{file, line, col} } - -// add returns the position at the end of s, assuming it starts at p. -func (p Position) add(s string) Position { - if n := strings.Count(s, "\n"); n > 0 { - p.Line += int32(n) - s = s[strings.LastIndex(s, "\n")+1:] - p.Col = 1 - } - p.Col += int32(utf8.RuneCountInString(s)) - return p -} - -func (p Position) String() string { - file := p.Filename() - if p.Line > 0 { - if p.Col > 0 { - return fmt.Sprintf("%s:%d:%d", file, p.Line, p.Col) - } - return fmt.Sprintf("%s:%d", file, p.Line) - } - return file -} - -func (p Position) isBefore(q Position) bool { - if p.Line != q.Line { - return p.Line < q.Line - } - return p.Col < q.Col -} - -// An scanner represents a single input file being parsed. -type scanner struct { - rest []byte // rest of input (in REPL, a line of input) - token []byte // token being scanned - pos Position // current input position - depth int // nesting of [ ] { } ( ) - indentstk []int // stack of indentation levels - dents int // number of saved INDENT (>0) or OUTDENT (<0) tokens to return - lineStart bool // after NEWLINE; convert spaces to indentation tokens - keepComments bool // accumulate comments in slice - lineComments []Comment // list of full line comments (if keepComments) - suffixComments []Comment // list of suffix comments (if keepComments) - - readline func() ([]byte, error) // read next line of input (REPL only) -} - -func newScanner(filename string, src interface{}, keepComments bool) (*scanner, error) { - var firstLine, firstCol int32 = 1, 1 - if portion, ok := src.(FilePortion); ok { - firstLine, firstCol = portion.FirstLine, portion.FirstCol - } - sc := &scanner{ - pos: MakePosition(&filename, firstLine, firstCol), - indentstk: make([]int, 1, 10), // []int{0} + spare capacity - lineStart: true, - keepComments: keepComments, - } - sc.readline, _ = src.(func() ([]byte, error)) // ParseCompoundStmt (REPL) only - if sc.readline == nil { - data, err := readSource(filename, src) - if err != nil { - return nil, err - } - sc.rest = data - } - return sc, nil -} - -func readSource(filename string, src interface{}) ([]byte, error) { - switch src := src.(type) { - case string: - return []byte(src), nil - case []byte: - return src, nil - case io.Reader: - data, err := ioutil.ReadAll(src) - if err != nil { - err = &os.PathError{Op: "read", Path: filename, Err: err} - return nil, err - } - return data, nil - case FilePortion: - return src.Content, nil - case nil: - return ioutil.ReadFile(filename) - default: - return nil, fmt.Errorf("invalid source: %T", src) - } -} - -// An Error describes the nature and position of a scanner or parser error. -type Error struct { - Pos Position - Msg string -} - -func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } - -// errorf is called to report an error. -// errorf does not return: it panics. -func (sc *scanner) error(pos Position, s string) { - panic(Error{pos, s}) -} - -func (sc *scanner) errorf(pos Position, format string, args ...interface{}) { - sc.error(pos, fmt.Sprintf(format, args...)) -} - -func (sc *scanner) recover(err *error) { - // The scanner and parser panic both for routine errors like - // syntax errors and for programmer bugs like array index - // errors. Turn both into error returns. Catching bug panics - // is especially important when processing many files. - switch e := recover().(type) { - case nil: - // no panic - case Error: - *err = e - default: - *err = Error{sc.pos, fmt.Sprintf("internal error: %v", e)} - if debug { - log.Fatal(*err) - } - } -} - -// eof reports whether the input has reached end of file. -func (sc *scanner) eof() bool { - return len(sc.rest) == 0 && !sc.readLine() -} - -// readLine attempts to read another line of input. -// Precondition: len(sc.rest)==0. -func (sc *scanner) readLine() bool { - if sc.readline != nil { - var err error - sc.rest, err = sc.readline() - if err != nil { - sc.errorf(sc.pos, "%v", err) // EOF or ErrInterrupt - } - return len(sc.rest) > 0 - } - return false -} - -// peekRune returns the next rune in the input without consuming it. -// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. -func (sc *scanner) peekRune() rune { - // TODO(adonovan): opt: measure and perhaps inline eof. - if sc.eof() { - return 0 - } - - // fast path: ASCII - if b := sc.rest[0]; b < utf8.RuneSelf { - if b == '\r' { - return '\n' - } - return rune(b) - } - - r, _ := utf8.DecodeRune(sc.rest) - return r -} - -// readRune consumes and returns the next rune in the input. -// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. -func (sc *scanner) readRune() rune { - // eof() has been inlined here, both to avoid a call - // and to establish len(rest)>0 to avoid a bounds check. - if len(sc.rest) == 0 { - if !sc.readLine() { - sc.error(sc.pos, "internal scanner error: readRune at EOF") - } - // Redundant, but eliminates the bounds-check below. - if len(sc.rest) == 0 { - return 0 - } - } - - // fast path: ASCII - if b := sc.rest[0]; b < utf8.RuneSelf { - r := rune(b) - sc.rest = sc.rest[1:] - if r == '\r' { - if len(sc.rest) > 0 && sc.rest[0] == '\n' { - sc.rest = sc.rest[1:] - } - r = '\n' - } - if r == '\n' { - sc.pos.Line++ - sc.pos.Col = 1 - } else { - sc.pos.Col++ - } - return r - } - - r, size := utf8.DecodeRune(sc.rest) - sc.rest = sc.rest[size:] - sc.pos.Col++ - return r -} - -// tokenValue records the position and value associated with each token. -type tokenValue struct { - raw string // raw text of token - int int64 // decoded int - bigInt *big.Int // decoded integers > int64 - float float64 // decoded float - string string // decoded string or bytes - pos Position // start position of token -} - -// startToken marks the beginning of the next input token. -// It must be followed by a call to endToken once the token has -// been consumed using readRune. -func (sc *scanner) startToken(val *tokenValue) { - sc.token = sc.rest - val.raw = "" - val.pos = sc.pos -} - -// endToken marks the end of an input token. -// It records the actual token string in val.raw if the caller -// has not done that already. -func (sc *scanner) endToken(val *tokenValue) { - if val.raw == "" { - val.raw = string(sc.token[:len(sc.token)-len(sc.rest)]) - } -} - -// nextToken is called by the parser to obtain the next input token. -// It returns the token value and sets val to the data associated with -// the token. -// -// For all our input tokens, the associated data is val.pos (the -// position where the token begins), val.raw (the input string -// corresponding to the token). For string and int tokens, the string -// and int fields additionally contain the token's interpreted value. -func (sc *scanner) nextToken(val *tokenValue) Token { - - // The following distribution of tokens guides case ordering: - // - // COMMA 27 % - // STRING 23 % - // IDENT 15 % - // EQL 11 % - // LBRACK 5.5 % - // RBRACK 5.5 % - // NEWLINE 3 % - // LPAREN 2.9 % - // RPAREN 2.9 % - // INT 2 % - // others < 1 % - // - // Although NEWLINE tokens are infrequent, and lineStart is - // usually (~97%) false on entry, skipped newlines account for - // about 50% of all iterations of the 'start' loop. - -start: - var c rune - - // Deal with leading spaces and indentation. - blank := false - savedLineStart := sc.lineStart - if sc.lineStart { - sc.lineStart = false - col := 0 - for { - c = sc.peekRune() - if c == ' ' { - col++ - sc.readRune() - } else if c == '\t' { - const tab = 8 - col += int(tab - (sc.pos.Col-1)%tab) - sc.readRune() - } else { - break - } - } - - // The third clause matches EOF. - if c == '#' || c == '\n' || c == 0 { - blank = true - } - - // Compute indentation level for non-blank lines not - // inside an expression. This is not the common case. - if !blank && sc.depth == 0 { - cur := sc.indentstk[len(sc.indentstk)-1] - if col > cur { - // indent - sc.dents++ - sc.indentstk = append(sc.indentstk, col) - } else if col < cur { - // outdent(s) - for len(sc.indentstk) > 0 && col < sc.indentstk[len(sc.indentstk)-1] { - sc.dents-- - sc.indentstk = sc.indentstk[:len(sc.indentstk)-1] // pop - } - if col != sc.indentstk[len(sc.indentstk)-1] { - sc.error(sc.pos, "unindent does not match any outer indentation level") - } - } - } - } - - // Return saved indentation tokens. - if sc.dents != 0 { - sc.startToken(val) - sc.endToken(val) - if sc.dents < 0 { - sc.dents++ - return OUTDENT - } else { - sc.dents-- - return INDENT - } - } - - // start of line proper - c = sc.peekRune() - - // Skip spaces. - for c == ' ' || c == '\t' { - sc.readRune() - c = sc.peekRune() - } - - // comment - if c == '#' { - if sc.keepComments { - sc.startToken(val) - } - // Consume up to newline (included). - for c != 0 && c != '\n' { - sc.readRune() - c = sc.peekRune() - } - if sc.keepComments { - sc.endToken(val) - if blank { - sc.lineComments = append(sc.lineComments, Comment{val.pos, val.raw}) - } else { - sc.suffixComments = append(sc.suffixComments, Comment{val.pos, val.raw}) - } - } - } - - // newline - if c == '\n' { - sc.lineStart = true - - // Ignore newlines within expressions (common case). - if sc.depth > 0 { - sc.readRune() - goto start - } - - // Ignore blank lines, except in the REPL, - // where they emit OUTDENTs and NEWLINE. - if blank { - if sc.readline == nil { - sc.readRune() - goto start - } else if len(sc.indentstk) > 1 { - sc.dents = 1 - len(sc.indentstk) - sc.indentstk = sc.indentstk[:1] - goto start - } - } - - // At top-level (not in an expression). - sc.startToken(val) - sc.readRune() - val.raw = "\n" - return NEWLINE - } - - // end of file - if c == 0 { - // Emit OUTDENTs for unfinished indentation, - // preceded by a NEWLINE if we haven't just emitted one. - if len(sc.indentstk) > 1 { - if savedLineStart { - sc.dents = 1 - len(sc.indentstk) - sc.indentstk = sc.indentstk[:1] - goto start - } else { - sc.lineStart = true - sc.startToken(val) - val.raw = "\n" - return NEWLINE - } - } - - sc.startToken(val) - sc.endToken(val) - return EOF - } - - // line continuation - if c == '\\' { - sc.readRune() - if sc.peekRune() != '\n' { - sc.errorf(sc.pos, "stray backslash in program") - } - sc.readRune() - goto start - } - - // start of the next token - sc.startToken(val) - - // comma (common case) - if c == ',' { - sc.readRune() - sc.endToken(val) - return COMMA - } - - // string literal - if c == '"' || c == '\'' { - return sc.scanString(val, c) - } - - // identifier or keyword - if isIdentStart(c) { - if (c == 'r' || c == 'b') && len(sc.rest) > 1 && (sc.rest[1] == '"' || sc.rest[1] == '\'') { - // r"..." - // b"..." - sc.readRune() - c = sc.peekRune() - return sc.scanString(val, c) - } else if c == 'r' && len(sc.rest) > 2 && sc.rest[1] == 'b' && (sc.rest[2] == '"' || sc.rest[2] == '\'') { - // rb"..." - sc.readRune() - sc.readRune() - c = sc.peekRune() - return sc.scanString(val, c) - } - - for isIdent(c) { - sc.readRune() - c = sc.peekRune() - } - sc.endToken(val) - if k, ok := keywordToken[val.raw]; ok { - return k - } - - return IDENT - } - - // brackets - switch c { - case '[', '(', '{': - sc.depth++ - sc.readRune() - sc.endToken(val) - switch c { - case '[': - return LBRACK - case '(': - return LPAREN - case '{': - return LBRACE - } - panic("unreachable") - - case ']', ')', '}': - if sc.depth == 0 { - sc.errorf(sc.pos, "unexpected %q", c) - } else { - sc.depth-- - } - sc.readRune() - sc.endToken(val) - switch c { - case ']': - return RBRACK - case ')': - return RPAREN - case '}': - return RBRACE - } - panic("unreachable") - } - - // int or float literal, or period - if isdigit(c) || c == '.' { - return sc.scanNumber(val, c) - } - - // other punctuation - defer sc.endToken(val) - switch c { - case '=', '<', '>', '!', '+', '-', '%', '/', '&', '|', '^': // possibly followed by '=' - start := sc.pos - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - switch c { - case '<': - return LE - case '>': - return GE - case '=': - return EQL - case '!': - return NEQ - case '+': - return PLUS_EQ - case '-': - return MINUS_EQ - case '/': - return SLASH_EQ - case '%': - return PERCENT_EQ - case '&': - return AMP_EQ - case '|': - return PIPE_EQ - case '^': - return CIRCUMFLEX_EQ - } - } - switch c { - case '=': - return EQ - case '<': - if sc.peekRune() == '<' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return LTLT_EQ - } else { - return LTLT - } - } - return LT - case '>': - if sc.peekRune() == '>' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return GTGT_EQ - } else { - return GTGT - } - } - return GT - case '!': - sc.error(start, "unexpected input character '!'") - case '+': - return PLUS - case '-': - return MINUS - case '/': - if sc.peekRune() == '/' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return SLASHSLASH_EQ - } else { - return SLASHSLASH - } - } - return SLASH - case '%': - return PERCENT - case '&': - return AMP - case '|': - return PIPE - case '^': - return CIRCUMFLEX - } - panic("unreachable") - - case ':', ';', '~': // single-char tokens (except comma) - sc.readRune() - switch c { - case ':': - return COLON - case ';': - return SEMI - case '~': - return TILDE - } - panic("unreachable") - - case '*': // possibly followed by '*' or '=' - sc.readRune() - switch sc.peekRune() { - case '*': - sc.readRune() - return STARSTAR - case '=': - sc.readRune() - return STAR_EQ - } - return STAR - } - - sc.errorf(sc.pos, "unexpected input character %#q", c) - panic("unreachable") -} - -func (sc *scanner) scanString(val *tokenValue, quote rune) Token { - start := sc.pos - triple := len(sc.rest) >= 3 && sc.rest[0] == byte(quote) && sc.rest[1] == byte(quote) && sc.rest[2] == byte(quote) - sc.readRune() - - // String literals may contain escaped or unescaped newlines, - // causing them to span multiple lines (gulps) of REPL input; - // they are the only such token. Thus we cannot call endToken, - // as it assumes sc.rest is unchanged since startToken. - // Instead, buffer the token here. - // TODO(adonovan): opt: buffer only if we encounter a newline. - raw := new(strings.Builder) - - // Copy the prefix, e.g. r' or " (see startToken). - raw.Write(sc.token[:len(sc.token)-len(sc.rest)]) - - if !triple { - // single-quoted string literal - for { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c := sc.readRune() - raw.WriteRune(c) - if c == quote { - break - } - if c == '\n' { - sc.error(val.pos, "unexpected newline in string") - } - if c == '\\' { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c = sc.readRune() - raw.WriteRune(c) - } - } - } else { - // triple-quoted string literal - sc.readRune() - raw.WriteRune(quote) - sc.readRune() - raw.WriteRune(quote) - - quoteCount := 0 - for { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c := sc.readRune() - raw.WriteRune(c) - if c == quote { - quoteCount++ - if quoteCount == 3 { - break - } - } else { - quoteCount = 0 - } - if c == '\\' { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c = sc.readRune() - raw.WriteRune(c) - } - } - } - val.raw = raw.String() - - s, _, isByte, err := unquote(val.raw) - if err != nil { - sc.error(start, err.Error()) - } - val.string = s - if isByte { - return BYTES - } else { - return STRING - } -} - -func (sc *scanner) scanNumber(val *tokenValue, c rune) Token { - // https://github.com/google/starlark-go/blob/master/doc/spec.md#lexical-elements - // - // Python features not supported: - // - integer literals of >64 bits of precision - // - 123L or 123l long suffix - // - traditional octal: 0755 - // https://docs.python.org/2/reference/lexical_analysis.html#integer-and-long-integer-literals - - start := sc.pos - fraction, exponent := false, false - - if c == '.' { - // dot or start of fraction - sc.readRune() - c = sc.peekRune() - if !isdigit(c) { - sc.endToken(val) - return DOT - } - fraction = true - } else if c == '0' { - // hex, octal, binary or float - sc.readRune() - c = sc.peekRune() - - if c == '.' { - fraction = true - } else if c == 'x' || c == 'X' { - // hex - sc.readRune() - c = sc.peekRune() - if !isxdigit(c) { - sc.error(start, "invalid hex literal") - } - for isxdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else if c == 'o' || c == 'O' { - // octal - sc.readRune() - c = sc.peekRune() - if !isodigit(c) { - sc.error(sc.pos, "invalid octal literal") - } - for isodigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else if c == 'b' || c == 'B' { - // binary - sc.readRune() - c = sc.peekRune() - if !isbdigit(c) { - sc.error(sc.pos, "invalid binary literal") - } - for isbdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else { - // float (or obsolete octal "0755") - allzeros, octal := true, true - for isdigit(c) { - if c != '0' { - allzeros = false - } - if c > '7' { - octal = false - } - sc.readRune() - c = sc.peekRune() - } - if c == '.' { - fraction = true - } else if c == 'e' || c == 'E' { - exponent = true - } else if octal && !allzeros { - sc.endToken(val) - sc.errorf(sc.pos, "obsolete form of octal literal; use 0o%s", val.raw[1:]) - } - } - } else { - // decimal - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - - if c == '.' { - fraction = true - } else if c == 'e' || c == 'E' { - exponent = true - } - } - - if fraction { - sc.readRune() // consume '.' - c = sc.peekRune() - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - - if c == 'e' || c == 'E' { - exponent = true - } - } - - if exponent { - sc.readRune() // consume [eE] - c = sc.peekRune() - if c == '+' || c == '-' { - sc.readRune() - c = sc.peekRune() - if !isdigit(c) { - sc.error(sc.pos, "invalid float literal") - } - } - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } - - sc.endToken(val) - if fraction || exponent { - var err error - val.float, err = strconv.ParseFloat(val.raw, 64) - if err != nil { - sc.error(sc.pos, "invalid float literal") - } - return FLOAT - } else { - var err error - s := val.raw - val.bigInt = nil - if len(s) > 2 && s[0] == '0' && (s[1] == 'o' || s[1] == 'O') { - val.int, err = strconv.ParseInt(s[2:], 8, 64) - } else if len(s) > 2 && s[0] == '0' && (s[1] == 'b' || s[1] == 'B') { - val.int, err = strconv.ParseInt(s[2:], 2, 64) - } else { - val.int, err = strconv.ParseInt(s, 0, 64) - if err != nil { - num := new(big.Int) - var ok bool - val.bigInt, ok = num.SetString(s, 0) - if ok { - err = nil - } - } - } - if err != nil { - sc.error(start, "invalid int literal") - } - return INT - } -} - -// isIdent reports whether c is an identifier rune. -func isIdent(c rune) bool { - return isdigit(c) || isIdentStart(c) -} - -func isIdentStart(c rune) bool { - return 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z' || - c == '_' || - unicode.IsLetter(c) -} - -func isdigit(c rune) bool { return '0' <= c && c <= '9' } -func isodigit(c rune) bool { return '0' <= c && c <= '7' } -func isxdigit(c rune) bool { return isdigit(c) || 'A' <= c && c <= 'F' || 'a' <= c && c <= 'f' } -func isbdigit(c rune) bool { return '0' == c || c == '1' } - -// keywordToken records the special tokens for -// strings that should not be treated as ordinary identifiers. -var keywordToken = map[string]Token{ - "and": AND, - "break": BREAK, - "continue": CONTINUE, - "def": DEF, - "elif": ELIF, - "else": ELSE, - "for": FOR, - "if": IF, - "in": IN, - "lambda": LAMBDA, - "load": LOAD, - "not": NOT, - "or": OR, - "pass": PASS, - "return": RETURN, - "while": WHILE, - - // reserved words: - "as": ILLEGAL, - // "assert": ILLEGAL, // heavily used by our tests - "class": ILLEGAL, - "del": ILLEGAL, - "except": ILLEGAL, - "finally": ILLEGAL, - "from": ILLEGAL, - "global": ILLEGAL, - "import": ILLEGAL, - "is": ILLEGAL, - "nonlocal": ILLEGAL, - "raise": ILLEGAL, - "try": ILLEGAL, - "with": ILLEGAL, - "yield": ILLEGAL, -} diff --git a/vendor/go.starlark.net/syntax/syntax.go b/vendor/go.starlark.net/syntax/syntax.go deleted file mode 100644 index 375663758..000000000 --- a/vendor/go.starlark.net/syntax/syntax.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package syntax provides a Starlark parser and abstract syntax tree. -package syntax // import "go.starlark.net/syntax" - -// A Node is a node in a Starlark syntax tree. -type Node interface { - // Span returns the start and end position of the expression. - Span() (start, end Position) - - // Comments returns the comments associated with this node. - // It returns nil if RetainComments was not specified during parsing, - // or if AllocComments was not called. - Comments() *Comments - - // AllocComments allocates a new Comments node if there was none. - // This makes possible to add new comments using Comments() method. - AllocComments() -} - -// A Comment represents a single # comment. -type Comment struct { - Start Position - Text string // without trailing newline -} - -// Comments collects the comments associated with an expression. -type Comments struct { - Before []Comment // whole-line comments before this expression - Suffix []Comment // end-of-line comments after this expression (up to 1) - - // For top-level expressions only, After lists whole-line - // comments following the expression. - After []Comment -} - -// A commentsRef is a possibly-nil reference to a set of comments. -// A commentsRef is embedded in each type of syntax node, -// and provides its Comments and AllocComments methods. -type commentsRef struct{ ref *Comments } - -// Comments returns the comments associated with a syntax node, -// or nil if AllocComments has not yet been called. -func (cr commentsRef) Comments() *Comments { return cr.ref } - -// AllocComments enables comments to be associated with a syntax node. -func (cr *commentsRef) AllocComments() { - if cr.ref == nil { - cr.ref = new(Comments) - } -} - -// Start returns the start position of the expression. -func Start(n Node) Position { - start, _ := n.Span() - return start -} - -// End returns the end position of the expression. -func End(n Node) Position { - _, end := n.Span() - return end -} - -// A File represents a Starlark file. -type File struct { - commentsRef - Path string - Stmts []Stmt - - Module interface{} // a *resolve.Module, set by resolver -} - -func (x *File) Span() (start, end Position) { - if len(x.Stmts) == 0 { - return - } - start, _ = x.Stmts[0].Span() - _, end = x.Stmts[len(x.Stmts)-1].Span() - return start, end -} - -// A Stmt is a Starlark statement. -type Stmt interface { - Node - stmt() -} - -func (*AssignStmt) stmt() {} -func (*BranchStmt) stmt() {} -func (*DefStmt) stmt() {} -func (*ExprStmt) stmt() {} -func (*ForStmt) stmt() {} -func (*WhileStmt) stmt() {} -func (*IfStmt) stmt() {} -func (*LoadStmt) stmt() {} -func (*ReturnStmt) stmt() {} - -// An AssignStmt represents an assignment: -// x = 0 -// x, y = y, x -// x += 1 -type AssignStmt struct { - commentsRef - OpPos Position - Op Token // = EQ | {PLUS,MINUS,STAR,PERCENT}_EQ - LHS Expr - RHS Expr -} - -func (x *AssignStmt) Span() (start, end Position) { - start, _ = x.LHS.Span() - _, end = x.RHS.Span() - return -} - -// A DefStmt represents a function definition. -type DefStmt struct { - commentsRef - Def Position - Name *Ident - Params []Expr // param = ident | ident=expr | * | *ident | **ident - Body []Stmt - - Function interface{} // a *resolve.Function, set by resolver -} - -func (x *DefStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.Def, end -} - -// An ExprStmt is an expression evaluated for side effects. -type ExprStmt struct { - commentsRef - X Expr -} - -func (x *ExprStmt) Span() (start, end Position) { - return x.X.Span() -} - -// An IfStmt is a conditional: If Cond: True; else: False. -// 'elseif' is desugared into a chain of IfStmts. -type IfStmt struct { - commentsRef - If Position // IF or ELIF - Cond Expr - True []Stmt - ElsePos Position // ELSE or ELIF - False []Stmt // optional -} - -func (x *IfStmt) Span() (start, end Position) { - body := x.False - if body == nil { - body = x.True - } - _, end = body[len(body)-1].Span() - return x.If, end -} - -// A LoadStmt loads another module and binds names from it: -// load(Module, "x", y="foo"). -// -// The AST is slightly unfaithful to the concrete syntax here because -// Starlark's load statement, so that it can be implemented in Python, -// binds some names (like y above) with an identifier and some (like x) -// without. For consistency we create fake identifiers for all the -// strings. -type LoadStmt struct { - commentsRef - Load Position - Module *Literal // a string - From []*Ident // name defined in loading module - To []*Ident // name in loaded module - Rparen Position -} - -func (x *LoadStmt) Span() (start, end Position) { - return x.Load, x.Rparen -} - -// ModuleName returns the name of the module loaded by this statement. -func (x *LoadStmt) ModuleName() string { return x.Module.Value.(string) } - -// A BranchStmt changes the flow of control: break, continue, pass. -type BranchStmt struct { - commentsRef - Token Token // = BREAK | CONTINUE | PASS - TokenPos Position -} - -func (x *BranchStmt) Span() (start, end Position) { - return x.TokenPos, x.TokenPos.add(x.Token.String()) -} - -// A ReturnStmt returns from a function. -type ReturnStmt struct { - commentsRef - Return Position - Result Expr // may be nil -} - -func (x *ReturnStmt) Span() (start, end Position) { - if x.Result == nil { - return x.Return, x.Return.add("return") - } - _, end = x.Result.Span() - return x.Return, end -} - -// An Expr is a Starlark expression. -type Expr interface { - Node - expr() -} - -func (*BinaryExpr) expr() {} -func (*CallExpr) expr() {} -func (*Comprehension) expr() {} -func (*CondExpr) expr() {} -func (*DictEntry) expr() {} -func (*DictExpr) expr() {} -func (*DotExpr) expr() {} -func (*Ident) expr() {} -func (*IndexExpr) expr() {} -func (*LambdaExpr) expr() {} -func (*ListExpr) expr() {} -func (*Literal) expr() {} -func (*ParenExpr) expr() {} -func (*SliceExpr) expr() {} -func (*TupleExpr) expr() {} -func (*UnaryExpr) expr() {} - -// An Ident represents an identifier. -type Ident struct { - commentsRef - NamePos Position - Name string - - Binding interface{} // a *resolver.Binding, set by resolver -} - -func (x *Ident) Span() (start, end Position) { - return x.NamePos, x.NamePos.add(x.Name) -} - -// A Literal represents a literal string or number. -type Literal struct { - commentsRef - Token Token // = STRING | BYTES | INT | FLOAT - TokenPos Position - Raw string // uninterpreted text - Value interface{} // = string | int64 | *big.Int | float64 -} - -func (x *Literal) Span() (start, end Position) { - return x.TokenPos, x.TokenPos.add(x.Raw) -} - -// A ParenExpr represents a parenthesized expression: (X). -type ParenExpr struct { - commentsRef - Lparen Position - X Expr - Rparen Position -} - -func (x *ParenExpr) Span() (start, end Position) { - return x.Lparen, x.Rparen.add(")") -} - -// A CallExpr represents a function call expression: Fn(Args). -type CallExpr struct { - commentsRef - Fn Expr - Lparen Position - Args []Expr // arg = expr | ident=expr | *expr | **expr - Rparen Position -} - -func (x *CallExpr) Span() (start, end Position) { - start, _ = x.Fn.Span() - return start, x.Rparen.add(")") -} - -// A DotExpr represents a field or method selector: X.Name. -type DotExpr struct { - commentsRef - X Expr - Dot Position - NamePos Position - Name *Ident -} - -func (x *DotExpr) Span() (start, end Position) { - start, _ = x.X.Span() - _, end = x.Name.Span() - return -} - -// A Comprehension represents a list or dict comprehension: -// [Body for ... if ...] or {Body for ... if ...} -type Comprehension struct { - commentsRef - Curly bool // {x:y for ...} or {x for ...}, not [x for ...] - Lbrack Position - Body Expr - Clauses []Node // = *ForClause | *IfClause - Rbrack Position -} - -func (x *Comprehension) Span() (start, end Position) { - return x.Lbrack, x.Rbrack.add("]") -} - -// A ForStmt represents a loop: for Vars in X: Body. -type ForStmt struct { - commentsRef - For Position - Vars Expr // name, or tuple of names - X Expr - Body []Stmt -} - -func (x *ForStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.For, end -} - -// A WhileStmt represents a while loop: while X: Body. -type WhileStmt struct { - commentsRef - While Position - Cond Expr - Body []Stmt -} - -func (x *WhileStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.While, end -} - -// A ForClause represents a for clause in a list comprehension: for Vars in X. -type ForClause struct { - commentsRef - For Position - Vars Expr // name, or tuple of names - In Position - X Expr -} - -func (x *ForClause) Span() (start, end Position) { - _, end = x.X.Span() - return x.For, end -} - -// An IfClause represents an if clause in a list comprehension: if Cond. -type IfClause struct { - commentsRef - If Position - Cond Expr -} - -func (x *IfClause) Span() (start, end Position) { - _, end = x.Cond.Span() - return x.If, end -} - -// A DictExpr represents a dictionary literal: { List }. -type DictExpr struct { - commentsRef - Lbrace Position - List []Expr // all *DictEntrys - Rbrace Position -} - -func (x *DictExpr) Span() (start, end Position) { - return x.Lbrace, x.Rbrace.add("}") -} - -// A DictEntry represents a dictionary entry: Key: Value. -// Used only within a DictExpr. -type DictEntry struct { - commentsRef - Key Expr - Colon Position - Value Expr -} - -func (x *DictEntry) Span() (start, end Position) { - start, _ = x.Key.Span() - _, end = x.Value.Span() - return start, end -} - -// A LambdaExpr represents an inline function abstraction. -type LambdaExpr struct { - commentsRef - Lambda Position - Params []Expr // param = ident | ident=expr | * | *ident | **ident - Body Expr - - Function interface{} // a *resolve.Function, set by resolver -} - -func (x *LambdaExpr) Span() (start, end Position) { - _, end = x.Body.Span() - return x.Lambda, end -} - -// A ListExpr represents a list literal: [ List ]. -type ListExpr struct { - commentsRef - Lbrack Position - List []Expr - Rbrack Position -} - -func (x *ListExpr) Span() (start, end Position) { - return x.Lbrack, x.Rbrack.add("]") -} - -// CondExpr represents the conditional: X if COND else ELSE. -type CondExpr struct { - commentsRef - If Position - Cond Expr - True Expr - ElsePos Position - False Expr -} - -func (x *CondExpr) Span() (start, end Position) { - start, _ = x.True.Span() - _, end = x.False.Span() - return start, end -} - -// A TupleExpr represents a tuple literal: (List). -type TupleExpr struct { - commentsRef - Lparen Position // optional (e.g. in x, y = 0, 1), but required if List is empty - List []Expr - Rparen Position -} - -func (x *TupleExpr) Span() (start, end Position) { - if x.Lparen.IsValid() { - return x.Lparen, x.Rparen - } else { - return Start(x.List[0]), End(x.List[len(x.List)-1]) - } -} - -// A UnaryExpr represents a unary expression: Op X. -// -// As a special case, UnaryOp{Op:Star} may also represent -// the star parameter in def f(*args) or def f(*, x). -type UnaryExpr struct { - commentsRef - OpPos Position - Op Token - X Expr // may be nil if Op==STAR -} - -func (x *UnaryExpr) Span() (start, end Position) { - if x.X != nil { - _, end = x.X.Span() - } else { - end = x.OpPos.add("*") - } - return x.OpPos, end -} - -// A BinaryExpr represents a binary expression: X Op Y. -// -// As a special case, BinaryExpr{Op:EQ} may also -// represent a named argument in a call f(k=v) -// or a named parameter in a function declaration -// def f(param=default). -type BinaryExpr struct { - commentsRef - X Expr - OpPos Position - Op Token - Y Expr -} - -func (x *BinaryExpr) Span() (start, end Position) { - start, _ = x.X.Span() - _, end = x.Y.Span() - return start, end -} - -// A SliceExpr represents a slice or substring expression: X[Lo:Hi:Step]. -type SliceExpr struct { - commentsRef - X Expr - Lbrack Position - Lo, Hi, Step Expr // all optional - Rbrack Position -} - -func (x *SliceExpr) Span() (start, end Position) { - start, _ = x.X.Span() - return start, x.Rbrack -} - -// An IndexExpr represents an index expression: X[Y]. -type IndexExpr struct { - commentsRef - X Expr - Lbrack Position - Y Expr - Rbrack Position -} - -func (x *IndexExpr) Span() (start, end Position) { - start, _ = x.X.Span() - return start, x.Rbrack -} diff --git a/vendor/go.starlark.net/syntax/walk.go b/vendor/go.starlark.net/syntax/walk.go deleted file mode 100644 index 5e6c805c2..000000000 --- a/vendor/go.starlark.net/syntax/walk.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Walk traverses a syntax tree in depth-first order. -// It starts by calling f(n); n must not be nil. -// If f returns true, Walk calls itself -// recursively for each non-nil child of n. -// Walk then calls f(nil). -func Walk(n Node, f func(Node) bool) { - if n == nil { - panic("nil") - } - if !f(n) { - return - } - - // TODO(adonovan): opt: order cases using profile data. - switch n := n.(type) { - case *File: - walkStmts(n.Stmts, f) - - case *ExprStmt: - Walk(n.X, f) - - case *BranchStmt: - // no-op - - case *IfStmt: - Walk(n.Cond, f) - walkStmts(n.True, f) - walkStmts(n.False, f) - - case *AssignStmt: - Walk(n.LHS, f) - Walk(n.RHS, f) - - case *DefStmt: - Walk(n.Name, f) - for _, param := range n.Params { - Walk(param, f) - } - walkStmts(n.Body, f) - - case *ForStmt: - Walk(n.Vars, f) - Walk(n.X, f) - walkStmts(n.Body, f) - - case *ReturnStmt: - if n.Result != nil { - Walk(n.Result, f) - } - - case *LoadStmt: - Walk(n.Module, f) - for _, from := range n.From { - Walk(from, f) - } - for _, to := range n.To { - Walk(to, f) - } - - case *Ident, *Literal: - // no-op - - case *ListExpr: - for _, x := range n.List { - Walk(x, f) - } - - case *ParenExpr: - Walk(n.X, f) - - case *CondExpr: - Walk(n.Cond, f) - Walk(n.True, f) - Walk(n.False, f) - - case *IndexExpr: - Walk(n.X, f) - Walk(n.Y, f) - - case *DictEntry: - Walk(n.Key, f) - Walk(n.Value, f) - - case *SliceExpr: - Walk(n.X, f) - if n.Lo != nil { - Walk(n.Lo, f) - } - if n.Hi != nil { - Walk(n.Hi, f) - } - if n.Step != nil { - Walk(n.Step, f) - } - - case *Comprehension: - Walk(n.Body, f) - for _, clause := range n.Clauses { - Walk(clause, f) - } - - case *IfClause: - Walk(n.Cond, f) - - case *ForClause: - Walk(n.Vars, f) - Walk(n.X, f) - - case *TupleExpr: - for _, x := range n.List { - Walk(x, f) - } - - case *DictExpr: - for _, entry := range n.List { - Walk(entry, f) - } - - case *UnaryExpr: - if n.X != nil { - Walk(n.X, f) - } - - case *BinaryExpr: - Walk(n.X, f) - Walk(n.Y, f) - - case *DotExpr: - Walk(n.X, f) - Walk(n.Name, f) - - case *CallExpr: - Walk(n.Fn, f) - for _, arg := range n.Args { - Walk(arg, f) - } - - case *LambdaExpr: - for _, param := range n.Params { - Walk(param, f) - } - Walk(n.Body, f) - - default: - panic(n) - } - - f(nil) -} - -func walkStmts(stmts []Stmt, f func(Node) bool) { - for _, stmt := range stmts { - Walk(stmt, f) - } -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index cf66309c4..db1c95fab 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b86793..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a90..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go index 2a938864c..b460e6f72 100644 --- a/vendor/golang.org/x/net/html/atom/table.go +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -11,23 +11,23 @@ const ( AcceptCharset Atom = 0x1a0e Accesskey Atom = 0x2c09 Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 + Action Atom = 0x26506 + Address Atom = 0x6f107 Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f + Allowfullscreen Atom = 0x3280f Allowpaymentrequest Atom = 0xc113 Allowusermedia Atom = 0xdd0e Alt Atom = 0xf303 Annotation Atom = 0x1c90a AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 + Applet Atom = 0x30806 + Area Atom = 0x35004 + Article Atom = 0x3f607 As Atom = 0x3c02 Aside Atom = 0x10705 Async Atom = 0xff05 Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c + Autocomplete Atom = 0x26b0c Autofocus Atom = 0x12109 Autoplay Atom = 0x13c08 B Atom = 0x101 @@ -43,34 +43,34 @@ const ( Br Atom = 0x202 Button Atom = 0x19106 Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 + Caption Atom = 0x22407 + Center Atom = 0x21306 + Challenge Atom = 0x28e09 Charset Atom = 0x2107 - Checked Atom = 0x47907 + Checked Atom = 0x5b507 Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 + Class Atom = 0x55805 + Code Atom = 0x5ee04 Col Atom = 0x1ab03 Colgroup Atom = 0x1ab08 Color Atom = 0x1bf05 Cols Atom = 0x1c404 Colspan Atom = 0x1c407 Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b + Content Atom = 0x57b07 + Contenteditable Atom = 0x57b0f + Contextmenu Atom = 0x37a0b Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 + Coords Atom = 0x1f006 + Crossorigin Atom = 0x1fa0b + Data Atom = 0x49904 + Datalist Atom = 0x49908 + Datetime Atom = 0x2ab08 + Dd Atom = 0x2bf02 Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 + Defer Atom = 0x5f005 + Del Atom = 0x44c03 + Desc Atom = 0x55504 Details Atom = 0x7207 Dfn Atom = 0x8703 Dialog Atom = 0xbb06 @@ -78,106 +78,106 @@ const ( Dirname Atom = 0x9307 Disabled Atom = 0x16408 Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 + Dl Atom = 0x5d602 + Download Atom = 0x45d08 Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 + Dropzone Atom = 0x3ff08 + Dt Atom = 0x64002 Em Atom = 0x6e02 Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 + Enctype Atom = 0x28007 + Face Atom = 0x21104 + Fieldset Atom = 0x21908 + Figcaption Atom = 0x2210a + Figure Atom = 0x23b06 Font Atom = 0x3f04 Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a + For Atom = 0x24703 + ForeignObject Atom = 0x2470d + Foreignobject Atom = 0x2540d + Form Atom = 0x26104 + Formaction Atom = 0x2610a + Formenctype Atom = 0x27c0b + Formmethod Atom = 0x2970a + Formnovalidate Atom = 0x2a10e + Formtarget Atom = 0x2b30a Frame Atom = 0x8b05 Frameset Atom = 0x8b08 H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 + H2 Atom = 0x56102 + H3 Atom = 0x2cd02 + H4 Atom = 0x2fc02 + H5 Atom = 0x33f02 + H6 Atom = 0x34902 + Head Atom = 0x32004 + Header Atom = 0x32006 + Headers Atom = 0x32007 Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 + Hgroup Atom = 0x64206 + Hidden Atom = 0x2bd06 + High Atom = 0x2ca04 Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 + Href Atom = 0x2cf04 + Hreflang Atom = 0x2cf08 Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a + HttpEquiv Atom = 0x2d70a I Atom = 0x601 - Icon Atom = 0x58a04 + Icon Atom = 0x57a04 Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 + Iframe Atom = 0x2eb06 + Image Atom = 0x2f105 + Img Atom = 0x2f603 + Input Atom = 0x44505 + Inputmode Atom = 0x44509 + Ins Atom = 0x20303 + Integrity Atom = 0x23209 Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 + Isindex Atom = 0x2fe07 + Ismap Atom = 0x30505 + Itemid Atom = 0x38506 Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 + Itemref Atom = 0x3c707 + Itemscope Atom = 0x66f09 + Itemtype Atom = 0x30e08 Kbd Atom = 0xb903 Keygen Atom = 0x3206 Keytype Atom = 0xd607 Kind Atom = 0x17704 Label Atom = 0x5905 - Lang Atom = 0x2e404 + Lang Atom = 0x2d304 Legend Atom = 0x18106 Li Atom = 0xb202 Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 + List Atom = 0x49d04 + Listing Atom = 0x49d07 Loop Atom = 0x5d04 Low Atom = 0xc303 Main Atom = 0x1004 Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 + Manifest Atom = 0x6d508 + Map Atom = 0x30703 Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 + Marquee Atom = 0x31607 + Math Atom = 0x31d04 + Max Atom = 0x33703 + Maxlength Atom = 0x33709 Media Atom = 0xe605 Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 + Menu Atom = 0x38104 + Menuitem Atom = 0x38108 + Meta Atom = 0x4ac04 Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 + Method Atom = 0x29b06 + Mglyph Atom = 0x2f706 + Mi Atom = 0x34102 + Min Atom = 0x34103 + Minlength Atom = 0x34109 + Mn Atom = 0x2a402 Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 + Ms Atom = 0x67202 + Mtext Atom = 0x34b05 + Multiple Atom = 0x35908 + Muted Atom = 0x36105 Name Atom = 0x9604 Nav Atom = 0x1303 Nobr Atom = 0x3704 @@ -185,101 +185,101 @@ const ( Noframes Atom = 0x8908 Nomodule Atom = 0xa208 Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 + Noscript Atom = 0x2c208 + Novalidate Atom = 0x2a50a + Object Atom = 0x25b06 Ol Atom = 0x13702 Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 + Onafterprint Atom = 0x2290c + Onautocomplete Atom = 0x2690e + Onautocompleteerror Atom = 0x26913 + Onauxclick Atom = 0x6140a + Onbeforeprint Atom = 0x69c0d + Onbeforeunload Atom = 0x6e50e + Onblur Atom = 0x1ea06 Oncancel Atom = 0x11908 Oncanplay Atom = 0x14d09 Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 + Onchange Atom = 0x41508 + Onclick Atom = 0x2e407 + Onclose Atom = 0x36607 + Oncontextmenu Atom = 0x3780d + Oncopy Atom = 0x38b06 + Oncuechange Atom = 0x3910b + Oncut Atom = 0x39c05 + Ondblclick Atom = 0x3a10a + Ondrag Atom = 0x3ab06 + Ondragend Atom = 0x3ab09 + Ondragenter Atom = 0x3b40b + Ondragexit Atom = 0x3bf0a + Ondragleave Atom = 0x3d90b + Ondragover Atom = 0x3e40a + Ondragstart Atom = 0x3ee0b + Ondrop Atom = 0x3fd06 + Ondurationchange Atom = 0x40d10 + Onemptied Atom = 0x40409 + Onended Atom = 0x41d07 + Onerror Atom = 0x42407 + Onfocus Atom = 0x42b07 + Onhashchange Atom = 0x4370c + Oninput Atom = 0x44307 + Oninvalid Atom = 0x44f09 + Onkeydown Atom = 0x45809 + Onkeypress Atom = 0x4650a + Onkeyup Atom = 0x47407 + Onlanguagechange Atom = 0x48110 + Onload Atom = 0x49106 + Onloadeddata Atom = 0x4910c + Onloadedmetadata Atom = 0x4a410 + Onloadend Atom = 0x4ba09 + Onloadstart Atom = 0x4c30b + Onmessage Atom = 0x4ce09 + Onmessageerror Atom = 0x4ce0e + Onmousedown Atom = 0x4dc0b + Onmouseenter Atom = 0x4e70c + Onmouseleave Atom = 0x4f30c + Onmousemove Atom = 0x4ff0b + Onmouseout Atom = 0x50a0a + Onmouseover Atom = 0x5170b + Onmouseup Atom = 0x52209 + Onmousewheel Atom = 0x5300c + Onoffline Atom = 0x53c09 + Ononline Atom = 0x54508 + Onpagehide Atom = 0x54d0a + Onpageshow Atom = 0x5630a + Onpaste Atom = 0x56f07 + Onpause Atom = 0x58a07 + Onplay Atom = 0x59406 + Onplaying Atom = 0x59409 + Onpopstate Atom = 0x59d0a + Onprogress Atom = 0x5a70a + Onratechange Atom = 0x5bc0c + Onrejectionhandled Atom = 0x5c812 + Onreset Atom = 0x5da07 + Onresize Atom = 0x5e108 + Onscroll Atom = 0x5f508 + Onsecuritypolicyviolation Atom = 0x5fd19 + Onseeked Atom = 0x61e08 + Onseeking Atom = 0x62609 + Onselect Atom = 0x62f08 + Onshow Atom = 0x63906 + Onsort Atom = 0x64d06 + Onstalled Atom = 0x65709 + Onstorage Atom = 0x66009 + Onsubmit Atom = 0x66908 + Onsuspend Atom = 0x67909 Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 + Ontoggle Atom = 0x68208 + Onunhandledrejection Atom = 0x68a14 + Onunload Atom = 0x6a908 + Onvolumechange Atom = 0x6b10e + Onwaiting Atom = 0x6bf09 + Onwheel Atom = 0x6c807 Open Atom = 0x1a304 Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 + Optimum Atom = 0x6cf07 + Option Atom = 0x6e106 + Output Atom = 0x51106 P Atom = 0xc01 Param Atom = 0xc05 Pattern Atom = 0x6607 @@ -288,466 +288,468 @@ const ( Placeholder Atom = 0x1310b Plaintext Atom = 0x1b209 Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 + Poster Atom = 0x64706 + Pre Atom = 0x46a03 + Preload Atom = 0x47a07 + Progress Atom = 0x5a908 + Prompt Atom = 0x52a06 + Public Atom = 0x57606 Q Atom = 0xcf01 Radiogroup Atom = 0x30a Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 + Readonly Atom = 0x35108 + Referrerpolicy Atom = 0x3cb0e + Rel Atom = 0x47b03 + Required Atom = 0x23f08 Reversed Atom = 0x8008 Rows Atom = 0x9c04 Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 + Rp Atom = 0x22f02 Rt Atom = 0x19a02 Rtc Atom = 0x19a03 Ruby Atom = 0xfb04 S Atom = 0x2501 Samp Atom = 0x7804 Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 + Scope Atom = 0x67305 + Scoped Atom = 0x67306 + Script Atom = 0x2c406 + Seamless Atom = 0x36b08 + Search Atom = 0x55c06 + Section Atom = 0x1e507 + Select Atom = 0x63106 + Selected Atom = 0x63108 + Shape Atom = 0x1f505 + Size Atom = 0x5e504 + Sizes Atom = 0x5e505 + Slot Atom = 0x20504 + Small Atom = 0x32605 + Sortable Atom = 0x64f08 + Sorted Atom = 0x37206 + Source Atom = 0x43106 + Spacer Atom = 0x46e06 Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 + Spellcheck Atom = 0x5b00a + Src Atom = 0x5e903 + Srcdoc Atom = 0x5e906 + Srclang Atom = 0x6f707 + Srcset Atom = 0x6fe06 + Start Atom = 0x3f405 + Step Atom = 0x57304 Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 + Strong Atom = 0x6db06 + Style Atom = 0x70405 + Sub Atom = 0x66b03 + Summary Atom = 0x70907 + Sup Atom = 0x71003 + Svg Atom = 0x71303 + System Atom = 0x71606 + Tabindex Atom = 0x4b208 + Table Atom = 0x58505 + Target Atom = 0x2b706 Tbody Atom = 0x2705 Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 + Template Atom = 0x71908 + Textarea Atom = 0x34c08 Tfoot Atom = 0xf505 Th Atom = 0x15602 - Thead Atom = 0x33005 + Thead Atom = 0x31f05 Time Atom = 0x4204 Title Atom = 0x11005 Tr Atom = 0xcc02 Track Atom = 0x1ba05 - Translate Atom = 0x1f209 + Translate Atom = 0x20809 Tt Atom = 0x6802 Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d + Typemustmatch Atom = 0x2830d U Atom = 0xb01 Ul Atom = 0xa702 Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 + Usemap Atom = 0x58e06 Value Atom = 0x1505 Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 + Video Atom = 0x2e005 + Wbr Atom = 0x56c03 + Width Atom = 0x63e05 + Workertype Atom = 0x7210a + Wrap Atom = 0x72b04 Xmp Atom = 0x12f03 ) -const hash0 = 0x81cdf10e +const hash0 = 0x84f70e16 const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command + 0x1: 0x3ff08, // dropzone + 0x2: 0x3b08, // basefont + 0x3: 0x23209, // integrity + 0x4: 0x43106, // source + 0x5: 0x2c09, // accesskey + 0x6: 0x1a06, // accept + 0x7: 0x6c807, // onwheel + 0xb: 0x47407, // onkeyup + 0xc: 0x32007, // headers + 0xd: 0x67306, // scoped + 0xe: 0x67909, // onsuspend + 0xf: 0x8908, // noframes + 0x10: 0x1fa0b, // crossorigin + 0x11: 0x2e407, // onclick + 0x12: 0x3f405, // start + 0x13: 0x37a0b, // contextmenu + 0x14: 0x5e903, // src + 0x15: 0x1c404, // cols + 0x16: 0xbb06, // dialog + 0x17: 0x47a07, // preload + 0x18: 0x3c707, // itemref + 0x1b: 0x2f105, // image + 0x1d: 0x4ba09, // onloadend + 0x1e: 0x45d08, // download + 0x1f: 0x46a03, // pre + 0x23: 0x2970a, // formmethod + 0x24: 0x71303, // svg + 0x25: 0xcf01, // q + 0x26: 0x64002, // dt + 0x27: 0x1de08, // controls + 0x2a: 0x2804, // body + 0x2b: 0xd206, // strike + 0x2c: 0x3910b, // oncuechange + 0x2d: 0x4c30b, // onloadstart + 0x2e: 0x2fe07, // isindex + 0x2f: 0xb202, // li + 0x30: 0x1400b, // playsinline + 0x31: 0x34102, // mi + 0x32: 0x30806, // applet + 0x33: 0x4ce09, // onmessage + 0x35: 0x13702, // ol + 0x36: 0x1a304, // open + 0x39: 0x14d09, // oncanplay + 0x3a: 0x6bf09, // onwaiting + 0x3b: 0x11908, // oncancel + 0x3c: 0x6a908, // onunload + 0x3e: 0x53c09, // onoffline + 0x3f: 0x1a0e, // accept-charset + 0x40: 0x32004, // head + 0x42: 0x3ab09, // ondragend + 0x43: 0x1310b, // placeholder + 0x44: 0x2b30a, // formtarget + 0x45: 0x2540d, // foreignobject + 0x47: 0x400c, // ontimeupdate + 0x48: 0xdd0e, // allowusermedia + 0x4a: 0x69c0d, // onbeforeprint + 0x4b: 0x5604, // html + 0x4c: 0x9f04, // span + 0x4d: 0x64206, // hgroup + 0x4e: 0x16408, // disabled + 0x4f: 0x4204, // time + 0x51: 0x42b07, // onfocus + 0x53: 0xb00a, // malignmark + 0x55: 0x4650a, // onkeypress + 0x56: 0x55805, // class + 0x57: 0x1ab08, // colgroup + 0x58: 0x33709, // maxlength + 0x59: 0x5a908, // progress + 0x5b: 0x70405, // style + 0x5c: 0x2a10e, // formnovalidate + 0x5e: 0x38b06, // oncopy + 0x60: 0x26104, // form + 0x61: 0xf606, // footer + 0x64: 0x30a, // radiogroup + 0x66: 0xfb04, // ruby + 0x67: 0x4ff0b, // onmousemove + 0x68: 0x19d08, // itemprop + 0x69: 0x2d70a, // http-equiv + 0x6a: 0x15602, // th + 0x6c: 0x6e02, // em + 0x6d: 0x38108, // menuitem + 0x6e: 0x63106, // select + 0x6f: 0x48110, // onlanguagechange + 0x70: 0x31f05, // thead + 0x71: 0x15c02, // h1 + 0x72: 0x5e906, // srcdoc + 0x75: 0x9604, // name + 0x76: 0x19106, // button + 0x77: 0x55504, // desc + 0x78: 0x17704, // kind + 0x79: 0x1bf05, // color + 0x7c: 0x58e06, // usemap + 0x7d: 0x30e08, // itemtype + 0x7f: 0x6d508, // manifest + 0x81: 0x5300c, // onmousewheel + 0x82: 0x4dc0b, // onmousedown + 0x84: 0xc05, // param + 0x85: 0x2e005, // video + 0x86: 0x4910c, // onloadeddata + 0x87: 0x6f107, // address + 0x8c: 0xef04, // ping + 0x8d: 0x24703, // for + 0x8f: 0x62f08, // onselect + 0x90: 0x30703, // map + 0x92: 0xc01, // p + 0x93: 0x8008, // reversed + 0x94: 0x54d0a, // onpagehide + 0x95: 0x3206, // keygen + 0x96: 0x34109, // minlength + 0x97: 0x3e40a, // ondragover + 0x98: 0x42407, // onerror + 0x9a: 0x2107, // charset + 0x9b: 0x29b06, // method + 0x9c: 0x101, // b + 0x9d: 0x68208, // ontoggle + 0x9e: 0x2bd06, // hidden + 0xa0: 0x3f607, // article + 0xa2: 0x63906, // onshow + 0xa3: 0x64d06, // onsort + 0xa5: 0x57b0f, // contenteditable + 0xa6: 0x66908, // onsubmit + 0xa8: 0x44f09, // oninvalid + 0xaa: 0x202, // br + 0xab: 0x10902, // id + 0xac: 0x5d04, // loop + 0xad: 0x5630a, // onpageshow + 0xb0: 0x2cf04, // href + 0xb2: 0x2210a, // figcaption + 0xb3: 0x2690e, // onautocomplete + 0xb4: 0x49106, // onload + 0xb6: 0x9c04, // rows + 0xb7: 0x1a605, // nonce + 0xb8: 0x68a14, // onunhandledrejection + 0xbb: 0x21306, // center + 0xbc: 0x59406, // onplay + 0xbd: 0x33f02, // h5 + 0xbe: 0x49d07, // listing + 0xbf: 0x57606, // public + 0xc2: 0x23b06, // figure + 0xc3: 0x57a04, // icon + 0xc4: 0x1ab03, // col + 0xc5: 0x47b03, // rel + 0xc6: 0xe605, // media + 0xc7: 0x12109, // autofocus + 0xc8: 0x19a02, // rt + 0xca: 0x2d304, // lang + 0xcc: 0x49908, // datalist + 0xce: 0x2eb06, // iframe + 0xcf: 0x36105, // muted + 0xd0: 0x6140a, // onauxclick + 0xd2: 0x3c02, // as + 0xd6: 0x3fd06, // ondrop + 0xd7: 0x1c90a, // annotation + 0xd8: 0x21908, // fieldset + 0xdb: 0x2cf08, // hreflang + 0xdc: 0x4e70c, // onmouseenter + 0xdd: 0x2a402, // mn + 0xde: 0xe60a, // mediagroup + 0xdf: 0x9805, // meter + 0xe0: 0x56c03, // wbr + 0xe2: 0x63e05, // width + 0xe3: 0x2290c, // onafterprint + 0xe4: 0x30505, // ismap + 0xe5: 0x1505, // value + 0xe7: 0x1303, // nav + 0xe8: 0x54508, // ononline + 0xe9: 0xb604, // mark + 0xea: 0xc303, // low + 0xeb: 0x3ee0b, // ondragstart + 0xef: 0x12f03, // xmp + 0xf0: 0x22407, // caption + 0xf1: 0xd904, // type + 0xf2: 0x70907, // summary + 0xf3: 0x6802, // tt + 0xf4: 0x20809, // translate + 0xf5: 0x1870a, // blockquote + 0xf8: 0x15702, // hr + 0xfa: 0x2705, // tbody + 0xfc: 0x7b07, // picture + 0xfd: 0x5206, // height + 0xfe: 0x19c04, // cite + 0xff: 0x2501, // s + 0x101: 0xff05, // async + 0x102: 0x56f07, // onpaste + 0x103: 0x19507, // onabort + 0x104: 0x2b706, // target + 0x105: 0x14b03, // bdo + 0x106: 0x1f006, // coords + 0x107: 0x5e108, // onresize + 0x108: 0x71908, // template + 0x10a: 0x3a02, // rb + 0x10b: 0x2a50a, // novalidate + 0x10c: 0x460e, // updateviacache + 0x10d: 0x71003, // sup + 0x10e: 0x6c07, // noembed + 0x10f: 0x16b03, // div + 0x110: 0x6f707, // srclang + 0x111: 0x17a09, // draggable + 0x112: 0x67305, // scope + 0x113: 0x5905, // label + 0x114: 0x22f02, // rp + 0x115: 0x23f08, // required + 0x116: 0x3780d, // oncontextmenu + 0x117: 0x5e504, // size + 0x118: 0x5b00a, // spellcheck + 0x119: 0x3f04, // font + 0x11a: 0x9c07, // rowspan + 0x11b: 0x10a07, // default + 0x11d: 0x44307, // oninput + 0x11e: 0x38506, // itemid + 0x11f: 0x5ee04, // code + 0x120: 0xaa07, // acronym + 0x121: 0x3b04, // base + 0x125: 0x2470d, // foreignObject + 0x126: 0x2ca04, // high + 0x127: 0x3cb0e, // referrerpolicy + 0x128: 0x33703, // max + 0x129: 0x59d0a, // onpopstate + 0x12a: 0x2fc02, // h4 + 0x12b: 0x4ac04, // meta + 0x12c: 0x17305, // blink + 0x12e: 0x5f508, // onscroll + 0x12f: 0x59409, // onplaying + 0x130: 0xc113, // allowpaymentrequest + 0x131: 0x19a03, // rtc + 0x132: 0x72b04, // wrap + 0x134: 0x8b08, // frameset + 0x135: 0x32605, // small + 0x137: 0x32006, // header + 0x138: 0x40409, // onemptied + 0x139: 0x34902, // h6 + 0x13a: 0x35908, // multiple + 0x13c: 0x52a06, // prompt + 0x13f: 0x28e09, // challenge + 0x141: 0x4370c, // onhashchange + 0x142: 0x57b07, // content + 0x143: 0x1c90e, // annotation-xml + 0x144: 0x36607, // onclose + 0x145: 0x14d10, // oncanplaythrough + 0x148: 0x5170b, // onmouseover + 0x149: 0x64f08, // sortable + 0x14a: 0xa402, // mo + 0x14b: 0x2cd02, // h3 + 0x14c: 0x2c406, // script + 0x14d: 0x41d07, // onended + 0x14f: 0x64706, // poster + 0x150: 0x7210a, // workertype + 0x153: 0x1f505, // shape + 0x154: 0x4, // abbr + 0x155: 0x1, // a + 0x156: 0x2bf02, // dd + 0x157: 0x71606, // system + 0x158: 0x4ce0e, // onmessageerror + 0x159: 0x36b08, // seamless + 0x15a: 0x2610a, // formaction + 0x15b: 0x6e106, // option + 0x15c: 0x31d04, // math + 0x15d: 0x62609, // onseeking + 0x15e: 0x39c05, // oncut + 0x15f: 0x44c03, // del + 0x160: 0x11005, // title + 0x161: 0x11505, // audio + 0x162: 0x63108, // selected + 0x165: 0x3b40b, // ondragenter + 0x166: 0x46e06, // spacer + 0x167: 0x4a410, // onloadedmetadata + 0x168: 0x44505, // input + 0x16a: 0x58505, // table + 0x16b: 0x41508, // onchange + 0x16e: 0x5f005, // defer + 0x171: 0x50a0a, // onmouseout + 0x172: 0x20504, // slot + 0x175: 0x3704, // nobr + 0x177: 0x1d707, // command + 0x17a: 0x7207, // details + 0x17b: 0x38104, // menu + 0x17c: 0xb903, // kbd + 0x17d: 0x57304, // step + 0x17e: 0x20303, // ins + 0x17f: 0x13c08, // autoplay + 0x182: 0x34103, // min + 0x183: 0x17404, // link + 0x185: 0x40d10, // ondurationchange + 0x186: 0x9202, // td + 0x187: 0x8b05, // frame + 0x18a: 0x2ab08, // datetime + 0x18b: 0x44509, // inputmode + 0x18c: 0x35108, // readonly + 0x18d: 0x21104, // face + 0x18f: 0x5e505, // sizes + 0x191: 0x4b208, // tabindex + 0x192: 0x6db06, // strong + 0x193: 0xba03, // bdi + 0x194: 0x6fe06, // srcset + 0x196: 0x67202, // ms + 0x197: 0x5b507, // checked + 0x198: 0xb105, // align + 0x199: 0x1e507, // section + 0x19b: 0x6e05, // embed + 0x19d: 0x15e07, // bgsound + 0x1a2: 0x49d04, // list + 0x1a3: 0x61e08, // onseeked + 0x1a4: 0x66009, // onstorage + 0x1a5: 0x2f603, // img + 0x1a6: 0xf505, // tfoot + 0x1a9: 0x26913, // onautocompleteerror + 0x1aa: 0x5fd19, // onsecuritypolicyviolation + 0x1ad: 0x9303, // dir + 0x1ae: 0x9307, // dirname + 0x1b0: 0x5a70a, // onprogress + 0x1b2: 0x65709, // onstalled + 0x1b5: 0x66f09, // itemscope + 0x1b6: 0x49904, // data + 0x1b7: 0x3d90b, // ondragleave + 0x1b8: 0x56102, // h2 + 0x1b9: 0x2f706, // mglyph + 0x1ba: 0x16502, // is + 0x1bb: 0x6e50e, // onbeforeunload + 0x1bc: 0x2830d, // typemustmatch + 0x1bd: 0x3ab06, // ondrag + 0x1be: 0x5da07, // onreset + 0x1c0: 0x51106, // output + 0x1c1: 0x12907, // sandbox + 0x1c2: 0x1b209, // plaintext + 0x1c4: 0x34c08, // textarea + 0x1c7: 0xd607, // keytype + 0x1c8: 0x34b05, // mtext + 0x1c9: 0x6b10e, // onvolumechange + 0x1ca: 0x1ea06, // onblur + 0x1cb: 0x58a07, // onpause + 0x1cd: 0x5bc0c, // onratechange + 0x1ce: 0x10705, // aside + 0x1cf: 0x6cf07, // optimum + 0x1d1: 0x45809, // onkeydown + 0x1d2: 0x1c407, // colspan + 0x1d3: 0x1004, // main + 0x1d4: 0x66b03, // sub + 0x1d5: 0x25b06, // object + 0x1d6: 0x55c06, // search + 0x1d7: 0x37206, // sorted + 0x1d8: 0x17003, // big + 0x1d9: 0xb01, // u + 0x1db: 0x26b0c, // autocomplete + 0x1dc: 0xcc02, // tr + 0x1dd: 0xf303, // alt + 0x1df: 0x7804, // samp + 0x1e0: 0x5c812, // onrejectionhandled + 0x1e1: 0x4f30c, // onmouseleave + 0x1e2: 0x28007, // enctype + 0x1e3: 0xa208, // nomodule + 0x1e5: 0x3280f, // allowfullscreen + 0x1e6: 0x5f08, // optgroup + 0x1e8: 0x27c0b, // formenctype + 0x1e9: 0x18106, // legend + 0x1ea: 0x10306, // canvas + 0x1eb: 0x6607, // pattern + 0x1ec: 0x2c208, // noscript + 0x1ed: 0x601, // i + 0x1ee: 0x5d602, // dl + 0x1ef: 0xa702, // ul + 0x1f2: 0x52209, // onmouseup + 0x1f4: 0x1ba05, // track + 0x1f7: 0x3a10a, // ondblclick + 0x1f8: 0x3bf0a, // ondragexit + 0x1fa: 0x8703, // dfn + 0x1fc: 0x26506, // action + 0x1fd: 0x35004, // area + 0x1fe: 0x31607, // marquee + 0x1ff: 0x16d03, // var } const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + @@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" + "ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" + + "ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" + + "ignobjectformactionautocompleteerrorformenctypemustmatchalle" + + "ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" + + "reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" + + "etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" + + "th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" + + "uitemidoncopyoncuechangeoncutondblclickondragendondragentero" + + "ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" + + "eondropzonemptiedondurationchangeonendedonerroronfocusourceo" + + "nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" + + "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" + + "databindexonloadendonloadstartonmessageerroronmousedownonmou" + + "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" + + "eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" + + "npageshowbronpastepublicontenteditableonpausemaponplayingonp" + + "opstateonprogresspellcheckedonratechangeonrejectionhandledon" + + "resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" + + "xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" + + "bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" + + "ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" + + "eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" + + "esummarysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 643c674e3..518ee4c94 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -924,7 +924,7 @@ func inBodyIM(p *parser) bool { p.addElement() p.im = inFramesetIM return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Search, a.Section, a.Summary, a.Ul: p.popUntil(buttonScope, a.P) p.addElement() case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: @@ -1136,7 +1136,7 @@ func inBodyIM(p *parser) bool { return false } return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Search, a.Section, a.Summary, a.Ul: p.popUntil(defaultScope, p.tok.DataAtom) case a.Form: if p.oe.contains(a.Template) { diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index 3c57880d6..6598c1f7b 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -839,8 +839,22 @@ func (z *Tokenizer) readStartTag() TokenType { if raw { z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) } - // Look for a self-closing token like "
". - if z.err == nil && z.buf[z.raw.end-2] == '/' { + // Look for a self-closing token (e.g.
). + // + // Originally, we did this by just checking that the last character of the + // tag (ignoring the closing bracket) was a solidus (/) character, but this + // is not always accurate. + // + // We need to be careful that we don't misinterpret a non-self-closing tag + // as self-closing, as can happen if the tag contains unquoted attribute + // values (i.e.

). + // + // To avoid this, we check that the last non-bracket character of the tag + // (z.raw.end-2) isn't the same character as the last non-quote character of + // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has + // attributes. + nAttrs := len(z.attr) + if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) { return SelfClosingTagToken } return StartTagToken diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 81faec7e7..97bd8b06f 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index c7601c909..6c18ea230 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,11 +34,19 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool - disableExtendedConnectProtocol bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -51,8 +59,8 @@ func init() { logFrameWrites = true logFrameReads = true } - if strings.Contains(e, "http2xconnect=0") { - disableExtendedConnectProtocol = true + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false } } @@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b55547aec..51fca38f6 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } @@ -2233,25 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -3270,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b2e2ed337..f26356b9c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1303,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1364,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1496,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err - } - // wait for setting frames to be received, a server can change this value later, // but we just wait for the first settings frame var isExtendedConnect bool @@ -1663,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -2070,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -func isNormalConnect(req *http.Request) bool { - return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" -} - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if !isNormalConnect(req) { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if !isNormalConnect(req) { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2298,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2464,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2556,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } @@ -2653,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2661,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2785,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -3331,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3515,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6ff6bee7e..fdb35b947 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 000000000..ed14da5af --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from vendor/golang.org/x/net/http2/headermap.go rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd20..92483d8e4 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 000000000..4b7055317 --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index ac76165ce..3448d2039 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/github.com/coder/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c20..48dbb9d84 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b8..74f052aa9 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 50593b6df..6a95da975 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63..a4ea5d14f 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b6..000000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434..000000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b2..9c105f23a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a68078..1e642f330 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 000000000..37a82528f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 000000000..1200487f2 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af06..abc395547 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3f..4f432bfe8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e320..75207613c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f0..c68acda53 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4a..a8c607ab8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f30..18563dd8d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573f..22912cdaa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e064..29344eb37 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25bd..20d51fb96 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66d..321b60902 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c04419..9bacdf1e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737cd..c22427261 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467e..6270c8ee1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc60..9966c1941 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c55..848e5fcc4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f040097..669b2adb8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a31332..4834e5751 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb..c6545413c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820c..c79aaff30 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf45..5eb450695 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b..05e502974 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe8..38c53ec51 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da..31d2e71a1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1ef..f4184a336 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5e..05b996227 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a..43a256e9e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a0..eea5ddfc2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7e..0d777bfbb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d5..b44636502 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416da..0c7d21c18 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766..840539169 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825b..fcf1b790d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77c..52d15b5f9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148dc..a46abe647 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go index cd94c5dc4..1aadcf407 100644 --- a/vendor/golang.org/x/text/internal/number/format.go +++ b/vendor/golang.org/x/text/internal/number/format.go @@ -394,9 +394,7 @@ func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, p exp := n.Exp - int32(n.Comma) exponential := f.Symbol(SymExponential) if exponential == "E" { - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = f.AppendDigit(dst, 1) dst = f.AppendDigit(dst, 0) switch { diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 4d57222e7..053336e28 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493..93a798ab6 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 3bfd50122..0d95fefcb 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -18,7 +18,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) @@ -78,7 +77,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // isMapIndex returns true if e is a map index expression. func isMapIndex(info *types.Info, e ast.Expr) bool { - if idx, ok := astutil.Unparen(e).(*ast.IndexExpr); ok { + if idx, ok := ast.Unparen(e).(*ast.IndexExpr); ok { if typ := info.Types[idx.X].Type; typ != nil { _, ok := typ.Underlying().(*types.Map) return ok diff --git a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index 564329774..8cec6e822 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -14,7 +14,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) @@ -169,7 +168,7 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { // seen[e] is already true; any newly processed exprs are added to seen. func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.Expr) { for { - e = astutil.Unparen(e) + e = ast.Unparen(e) if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok { seen[b] = true exprs = append(exprs, op.split(b.Y, seen)...) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index 4e8643975..26ec06831 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -19,7 +19,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" ) const debug = false @@ -65,7 +64,7 @@ func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(t // Is this a C.f() call? var name string - if sel, ok := astutil.Unparen(call.Fun).(*ast.SelectorExpr); ok { + if sel, ok := ast.Unparen(call.Fun).(*ast.SelectorExpr); ok { if id, ok := sel.X.(*ast.Ident); ok && id.Name == "C" { name = sel.Sel.Name } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go index 8cc6c4a05..f56c3e622 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go @@ -15,7 +15,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -72,7 +71,7 @@ func run(pass *analysis.Pass) (interface{}, error) { return } var structuralTypes []types.Type - switch typ := aliases.Unalias(typ).(type) { + switch typ := types.Unalias(typ).(type) { case *types.TypeParam: terms, err := typeparams.StructuralTerms(typ) if err != nil { @@ -146,7 +145,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // isLocalType reports whether typ belongs to the same package as pass. // TODO(adonovan): local means "internal to a function"; rename to isSamePackageType. func isLocalType(pass *analysis.Pass, typ types.Type) bool { - switch x := aliases.Unalias(typ).(type) { + switch x := types.Unalias(typ).(type) { case *types.Struct: // struct literals are local types return true diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index 8f6e7db6a..03496cb30 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -16,10 +16,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) const Doc = `check for locks erroneously passed by value @@ -40,18 +39,25 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var goversion string // effective file version ("" => unknown) nodeFilter := []ast.Node{ (*ast.AssignStmt)(nil), (*ast.CallExpr)(nil), (*ast.CompositeLit)(nil), + (*ast.File)(nil), (*ast.FuncDecl)(nil), (*ast.FuncLit)(nil), (*ast.GenDecl)(nil), (*ast.RangeStmt)(nil), (*ast.ReturnStmt)(nil), } - inspect.Preorder(nodeFilter, func(node ast.Node) { + inspect.WithStack(nodeFilter, func(node ast.Node, push bool, stack []ast.Node) bool { + if !push { + return false + } switch node := node.(type) { + case *ast.File: + goversion = versions.FileVersion(pass.TypesInfo, node) case *ast.RangeStmt: checkCopyLocksRange(pass, node) case *ast.FuncDecl: @@ -61,7 +67,7 @@ func run(pass *analysis.Pass) (interface{}, error) { case *ast.CallExpr: checkCopyLocksCallExpr(pass, node) case *ast.AssignStmt: - checkCopyLocksAssign(pass, node) + checkCopyLocksAssign(pass, node, goversion, parent(stack)) case *ast.GenDecl: checkCopyLocksGenDecl(pass, node) case *ast.CompositeLit: @@ -69,16 +75,36 @@ func run(pass *analysis.Pass) (interface{}, error) { case *ast.ReturnStmt: checkCopyLocksReturnStmt(pass, node) } + return true }) return nil, nil } // checkCopyLocksAssign checks whether an assignment // copies a lock. -func checkCopyLocksAssign(pass *analysis.Pass, as *ast.AssignStmt) { - for i, x := range as.Rhs { +func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion string, parent ast.Node) { + lhs := assign.Lhs + for i, x := range assign.Rhs { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisutil.Format(pass.Fset, as.Lhs[i]), path) + pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisutil.Format(pass.Fset, assign.Lhs[i]), path) + lhs = nil // An lhs has been reported. We prefer the assignment warning and do not report twice. + } + } + + // After GoVersion 1.22, loop variables are implicitly copied on each iteration. + // So a for statement may inadvertently copy a lock when any of the + // iteration variables contain locks. + if assign.Tok == token.DEFINE && versions.AtLeast(goversion, versions.Go1_22) { + if parent, _ := parent.(*ast.ForStmt); parent != nil && parent.Init == assign { + for _, l := range lhs { + if id, ok := l.(*ast.Ident); ok && id.Name != "_" { + if obj := pass.TypesInfo.Defs[id]; obj != nil && obj.Type() != nil { + if path := lockPath(pass.Pkg, obj.Type(), nil); path != nil { + pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", analysisutil.Format(pass.Fset, l), path) + } + } + } + } } } } @@ -225,7 +251,7 @@ func (path typePath) String() string { } func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { - x = astutil.Unparen(x) // ignore parens on rhs + x = ast.Unparen(x) // ignore parens on rhs if _, ok := x.(*ast.CompositeLit); ok { return nil @@ -235,7 +261,7 @@ func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { return nil } if star, ok := x.(*ast.StarExpr); ok { - if _, ok := astutil.Unparen(star.X).(*ast.CallExpr); ok { + if _, ok := ast.Unparen(star.X).(*ast.CallExpr); ok { // A call may return a pointer to a zero value. return nil } @@ -259,7 +285,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ } seen[typ] = true - if tpar, ok := aliases.Unalias(typ).(*types.TypeParam); ok { + if tpar, ok := types.Unalias(typ).(*types.TypeParam); ok { terms, err := typeparams.StructuralTerms(tpar) if err != nil { return nil // invalid type @@ -340,6 +366,14 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ return nil } +// parent returns the second from the last node on stack if it exists. +func parent(stack []ast.Node) ast.Node { + if len(stack) >= 2 { + return stack[len(stack)-2] + } + return nil +} + var lockerType *types.Interface // Construct a sync.Locker interface type. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go index 95cd9a061..70b5e39ec 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go @@ -15,7 +15,6 @@ import ( "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/aliases" ) const Doc = `check for calls of reflect.DeepEqual on error values @@ -102,7 +101,7 @@ func containsError(typ types.Type) bool { return true } } - case *types.Named, *aliases.Alias: + case *types.Named, *types.Alias: return check(t.Underlying()) // We list the remaining valid type kinds for completeness. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go index 8af717b4c..93fa39140 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -53,7 +53,7 @@ so the analyzer is not included in typical suites such as vet or gopls. Use this standalone command to run it on your code: $ go install golang.org/x/tools/go/analysis/passes/fieldalignment/cmd/fieldalignment@latest - $ go fieldalignment [packages] + $ fieldalignment [packages] ` diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go index e1ca9b2f5..91ebe29de 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -14,7 +14,6 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typesinternal" ) @@ -137,7 +136,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { if analysisutil.IsNamedType(typ, "net/http", "Client") { return true // method on http.Client. } - ptr, ok := aliases.Unalias(typ).(*types.Pointer) + ptr, ok := types.Unalias(typ).(*types.Pointer) return ok && analysisutil.IsNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client. } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go index f7f071dc8..a4fa8d31c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go @@ -15,7 +15,6 @@ import ( "os" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/analysisinternal" ) @@ -121,7 +120,7 @@ func Imports(pkg *types.Package, path string) bool { // This function avoids allocating the concatenation of "pkg.Name", // which is important for the performance of syntax matching. func IsNamedType(t types.Type, pkgPath string, names ...string) bool { - n, ok := aliases.Unalias(t).(*types.Named) + n, ok := types.Unalias(t).(*types.Named) if !ok { return false } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go index 0c7e9c5d0..faaf12a93 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -14,7 +14,6 @@ import ( "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -300,7 +299,7 @@ func nilnessOf(stack []fact, v ssa.Value) nilness { } case *ssa.MakeInterface: // A MakeInterface is non-nil unless its operand is a type parameter. - tparam, ok := aliases.Unalias(v.X.Type()).(*types.TypeParam) + tparam, ok := types.Unalias(v.X.Type()).(*types.TypeParam) if !ok { return isnonnil } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index b3453f8fc..2d79d0b03 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -24,7 +24,6 @@ import ( "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -159,10 +158,11 @@ func maybePrintfWrapper(info *types.Info, decl ast.Decl) *printfWrapper { params := sig.Params() nparams := params.Len() // variadic => nonzero + // Check final parameter is "args ...interface{}". args := params.At(nparams - 1) - iface, ok := args.Type().(*types.Slice).Elem().(*types.Interface) + iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) if !ok || !iface.Empty() { - return nil // final (args) param is not ...interface{} + return nil } // Is second last param 'format string'? @@ -514,7 +514,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F // non-constant format string and no arguments: // if msg contains "%", misformatting occurs. // Report the problem and suggest a fix: fmt.Printf("%s", msg). - if idx == len(call.Args)-1 { + if !suppressNonconstants && idx == len(call.Args)-1 { pass.Report(analysis.Diagnostic{ Pos: formatArg.Pos(), End: formatArg.End(), @@ -1012,7 +1012,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { typ := params.At(firstArg).Type() typ = typ.(*types.Slice).Elem() - it, ok := aliases.Unalias(typ).(*types.Interface) + it, ok := types.Unalias(typ).(*types.Interface) if !ok || !it.Empty() { // Skip variadic functions accepting non-interface{} args. return @@ -1100,3 +1100,12 @@ func (ss stringSet) Set(flag string) error { } return nil } + +// suppressNonconstants suppresses reporting printf calls with +// non-constant formatting strings (proposal #60529) when true. +// +// This variable is to allow for staging the transition to newer +// versions of x/tools by vendoring. +// +// Remove this after the 1.24 release. +var suppressNonconstants bool diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go index 017c8a247..f7e50f98a 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -10,7 +10,6 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -73,7 +72,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { return true } - if typ, _ := aliases.Unalias(typ).(*types.TypeParam); typ != nil { + if typ, _ := types.Unalias(typ).(*types.TypeParam); typ != nil { // Avoid infinite recursion through type parameters. if m.seen[typ] { return true @@ -276,7 +275,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { } func isConvertibleToString(typ types.Type) bool { - if bt, ok := aliases.Unalias(typ).(*types.Basic); ok && bt.Kind() == types.UntypedNil { + if bt, ok := types.Unalias(typ).(*types.Basic); ok && bt.Kind() == types.UntypedNil { // We explicitly don't want untyped nil, which is // convertible to both of the interfaces below, as it // would just panic anyway. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go index d01eb1eeb..759ed0043 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -21,7 +21,6 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -100,7 +99,7 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { return } var structuralTypes []types.Type - switch t := aliases.Unalias(t).(type) { + switch t := types.Unalias(t).(type) { case *types.TypeParam: terms, err := typeparams.StructuralTerms(t) if err != nil { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index a3afbf696..108600a2b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -15,7 +15,6 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typeparams" ) @@ -248,7 +247,7 @@ func run(pass *analysis.Pass) (interface{}, error) { func structuralTypes(t types.Type) ([]types.Type, error) { var structuralTypes []types.Type - if tp, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tp, ok := types.Unalias(t).(*types.TypeParam); ok { terms, err := typeparams.StructuralTerms(tp) if err != nil { return nil, err diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index 828f95bc8..effcdc570 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -14,10 +14,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/aliases" ) //go:embed doc.go @@ -186,7 +184,7 @@ func withinScope(scope ast.Node, x *types.Var) bool { func goAsyncCall(info *types.Info, goStmt *ast.GoStmt, toDecl func(*types.Func) *ast.FuncDecl) *asyncCall { call := goStmt.Call - fun := astutil.Unparen(call.Fun) + fun := ast.Unparen(call.Fun) if id := funcIdent(fun); id != nil { if lit := funcLitInScope(id); lit != nil { return &asyncCall{region: lit, async: goStmt, scope: nil, fun: fun} @@ -213,7 +211,7 @@ func tRunAsyncCall(info *types.Info, call *ast.CallExpr) *asyncCall { return nil } - fun := astutil.Unparen(call.Args[1]) + fun := ast.Unparen(call.Args[1]) if lit, ok := fun.(*ast.FuncLit); ok { // function lit? return &asyncCall{region: lit, async: call, scope: lit, fun: fun} } @@ -243,7 +241,7 @@ var forbidden = []string{ // Returns (nil, nil, nil) if call is not of this form. func forbiddenMethod(info *types.Info, call *ast.CallExpr) (*types.Var, *types.Selection, *types.Func) { // Compare to typeutil.StaticCallee. - fun := astutil.Unparen(call.Fun) + fun := ast.Unparen(call.Fun) selExpr, ok := fun.(*ast.SelectorExpr) if !ok { return nil, nil, nil @@ -254,7 +252,7 @@ func forbiddenMethod(info *types.Info, call *ast.CallExpr) (*types.Var, *types.S } var x *types.Var - if id, ok := astutil.Unparen(selExpr.X).(*ast.Ident); ok { + if id, ok := ast.Unparen(selExpr.X).(*ast.Ident); ok { x, _ = info.Uses[id].(*types.Var) } if x == nil { @@ -271,7 +269,7 @@ func forbiddenMethod(info *types.Info, call *ast.CallExpr) (*types.Var, *types.S func formatMethod(sel *types.Selection, fn *types.Func) string { var ptr string rtype := sel.Recv() - if p, ok := aliases.Unalias(rtype).(*types.Pointer); ok { + if p, ok := types.Unalias(rtype).(*types.Pointer); ok { ptr = "*" rtype = p.Elem() } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go index ad815f190..8c7a51ca5 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go @@ -8,7 +8,6 @@ import ( "go/ast" "go/types" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/typeparams" ) @@ -56,7 +55,7 @@ func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { } func funcIdent(fun ast.Expr) *ast.Ident { - switch fun := astutil.Unparen(fun).(type) { + switch fun := ast.Unparen(fun).(type) { case *ast.IndexExpr, *ast.IndexListExpr: x, _, _, _ := typeparams.UnpackIndexExpr(fun) // necessary? id, _ := x.(*ast.Ident) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index 14e4a6c1e..272ae7fe0 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -15,9 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" ) //go:embed doc.go @@ -70,7 +68,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // Check unsafe.Pointer safety rules according to // https://golang.org/pkg/unsafe/#Pointer. - switch x := astutil.Unparen(x).(type) { + switch x := ast.Unparen(x).(type) { case *ast.SelectorExpr: // "(6) Conversion of a reflect.SliceHeader or // reflect.StringHeader Data field to or from Pointer." @@ -89,7 +87,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // by the time we get to the conversion at the end. // For now approximate by saying that *Header is okay // but Header is not. - pt, ok := aliases.Unalias(info.Types[x.X].Type).(*types.Pointer) + pt, ok := types.Unalias(info.Types[x.X].Type).(*types.Pointer) if ok && isReflectHeader(pt.Elem()) { return true } @@ -119,7 +117,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // isSafeArith reports whether x is a pointer arithmetic expression that is safe // to convert to unsafe.Pointer. func isSafeArith(info *types.Info, x ast.Expr) bool { - switch x := astutil.Unparen(x).(type) { + switch x := ast.Unparen(x).(type) { case *ast.CallExpr: // Base case: initial conversion from unsafe.Pointer to uintptr. return len(x.Args) == 1 && diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index 76f42b052..c27d26dd6 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -24,7 +24,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" ) @@ -101,7 +100,7 @@ func run(pass *analysis.Pass) (interface{}, error) { (*ast.ExprStmt)(nil), } inspect.Preorder(nodeFilter, func(n ast.Node) { - call, ok := astutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) + call, ok := ast.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) if !ok { return // not a call statement } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go index a99c54833..3f651fc26 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go @@ -12,7 +12,6 @@ import ( "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -32,7 +31,6 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (interface{}, error) { ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) for _, fn := range ssainput.SrcFuncs { - // TODO(taking): Iterate over fn._Instantiations() once exported. If so, have 1 report per Pos(). reports := checkStores(fn) for _, store := range reports { switch addr := store.Addr.(type) { @@ -143,7 +141,7 @@ func hasStructOrArrayType(v ssa.Value) bool { // func (t T) f() { ...} // the receiver object is of type *T: // t0 = local T (t) *T - if tp, ok := aliases.Unalias(alloc.Type()).(*types.Pointer); ok { + if tp, ok := types.Unalias(alloc.Type()).(*types.Pointer); ok { return isStructOrArray(tp.Elem()) } return false diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 6bdcf70ac..ca71e3e10 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,13 +7,5 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. -// TODO(adonovan): use go1.22's ast.Unparen. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} +// Deprecated: use [ast.Unparen]. +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 1fc1de0bd..0e0ba4c03 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -73,6 +73,15 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 000000000..b7e959114 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f5..f1931d10e 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711b..8f7afcb5d 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -82,7 +82,7 @@ type DriverResponse struct { type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21b..5fcad6ea6 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,46 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0b6bfaff8..f227f1bab 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -46,10 +46,10 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( @@ -103,25 +103,37 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // Be sure to update loadmode_string.go when adding new items! ) const ( + // LoadFiles loads the name and file names for the initial packages. + // // Deprecated: LoadFiles exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + // LoadImports loads the name, file names, and import mapping for the initial packages. + // // Deprecated: LoadImports exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports + // LoadTypes loads exported type information for the initial packages. + // // Deprecated: LoadTypes exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + // LoadSyntax loads typed syntax for the initial packages. + // // Deprecated: LoadSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps @@ -236,14 +248,13 @@ type Config struct { // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -260,7 +271,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -763,6 +774,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index 55943e45d..b109fbf3c 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -82,7 +82,6 @@ import ( "runtime" "sync" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/versions" ) @@ -854,7 +853,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { if types.IsInterface(rt) { // If v may be an interface type I (after instantiating), // we must emit a check that v is non-nil. - if recv, ok := aliases.Unalias(sel.recv).(*types.TypeParam); ok { + if recv, ok := types.Unalias(sel.recv).(*types.TypeParam); ok { // Emit a nil check if any possible instantiation of the // type parameter is an interface type. if typeSetOf(recv).Len() > 0 { @@ -2508,7 +2507,7 @@ func (b *builder) rangeFunc(fn *Function, x Value, tk, tv types.Type, rng *ast.R name: fmt.Sprintf("%s$%d", fn.Name(), anonIdx+1), Signature: ysig, Synthetic: "range-over-func yield", - pos: rangePosition(rng), + pos: rng.Range, parent: fn, anonIdx: int32(len(fn.AnonFuncs)), Pkg: fn.Pkg, @@ -2566,6 +2565,8 @@ func (b *builder) rangeFunc(fn *Function, x Value, tk, tv types.Type, rng *ast.R emitJump(fn, done) fn.currentBlock = done + // pop the stack for the range-over-func + fn.targets = fn.targets.tail } // buildYieldResume emits to fn code for how to resume execution once a call to @@ -2967,7 +2968,7 @@ func (b *builder) buildFromSyntax(fn *Function) { func (b *builder) buildYieldFunc(fn *Function) { // See builder.rangeFunc for detailed documentation on how fn is set up. // - // In psuedo-Go this roughly builds: + // In pseudo-Go this roughly builds: // func yield(_k tk, _v tv) bool { // if jump != READY { panic("yield function called after range loop exit") } // jump = BUSY @@ -2998,6 +2999,7 @@ func (b *builder) buildYieldFunc(fn *Function) { } } fn.targets = &targets{ + tail: fn.targets, _continue: ycont, // `break` statement targets fn.parent.targets._break. } @@ -3075,6 +3077,8 @@ func (b *builder) buildYieldFunc(fn *Function) { // unreachable. emitJump(fn, ycont) } + // pop the stack for the yield function + fn.targets = fn.targets.tail // Clean up exits and promote any unresolved exits to fn.parent. for _, e := range fn.exits { @@ -3104,17 +3108,17 @@ func (b *builder) buildYieldFunc(fn *Function) { fn.finishBody() } -// addRuntimeType records t as a runtime type, -// along with all types derivable from it using reflection. +// addMakeInterfaceType records non-interface type t as the type of +// the operand a MakeInterface operation, for [Program.RuntimeTypes]. // -// Acquires prog.runtimeTypesMu. -func addRuntimeType(prog *Program, t types.Type) { - prog.runtimeTypesMu.Lock() - defer prog.runtimeTypesMu.Unlock() - forEachReachable(&prog.MethodSets, t, func(t types.Type) bool { - prev, _ := prog.runtimeTypes.Set(t, true).(bool) - return !prev // already seen? - }) +// Acquires prog.makeInterfaceTypesMu. +func addMakeInterfaceType(prog *Program, t types.Type) { + prog.makeInterfaceTypesMu.Lock() + defer prog.makeInterfaceTypesMu.Unlock() + if prog.makeInterfaceTypes == nil { + prog.makeInterfaceTypes = make(map[types.Type]unit) + } + prog.makeInterfaceTypes[t] = unit{} } // Build calls Package.Build for each package in prog. diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go index 2a4e0dde2..865329bfd 100644 --- a/vendor/golang.org/x/tools/go/ssa/const.go +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -14,7 +14,6 @@ import ( "strconv" "strings" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -114,7 +113,7 @@ func zeroString(t types.Type, from *types.Package) string { } case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: return "nil" - case *types.Named, *aliases.Alias: + case *types.Named, *types.Alias: return zeroString(t.Underlying(), from) case *types.Array, *types.Struct: return relType(t, from) + "{}" diff --git a/vendor/golang.org/x/tools/go/ssa/coretype.go b/vendor/golang.org/x/tools/go/ssa/coretype.go index 8c218f919..d93713422 100644 --- a/vendor/golang.org/x/tools/go/ssa/coretype.go +++ b/vendor/golang.org/x/tools/go/ssa/coretype.go @@ -7,7 +7,6 @@ package ssa import ( "go/types" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -51,7 +50,7 @@ func typeSetOf(typ types.Type) termList { var terms []*types.Term var err error // typeSetOf(t) == typeSetOf(Unalias(t)) - switch typ := aliases.Unalias(typ).(type) { + switch typ := types.Unalias(typ).(type) { case *types.TypeParam: terms, err = typeparams.StructuralTerms(typ) case *types.Union: diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go index 423bce871..2fa3d0757 100644 --- a/vendor/golang.org/x/tools/go/ssa/create.go +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -193,11 +193,7 @@ func membersFromDecl(pkg *Package, decl ast.Decl, goversion string) { // // The real work of building SSA form for each function is not done // until a subsequent call to Package.Build. -// -// CreatePackage should not be called after building any package in -// the program. func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { - // TODO(adonovan): assert that no package has yet been built. if pkg == nil { panic("nil pkg") // otherwise pkg.Scope below returns types.Universe! } diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go index c664ff85a..176c1e1a7 100644 --- a/vendor/golang.org/x/tools/go/ssa/emit.go +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -249,7 +249,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value { // non-parameterized, as they are the set of runtime types. t := val.Type() if f.typeparams.Len() == 0 || !f.Prog.isParameterized(t) { - addRuntimeType(f.Prog, t) + addMakeInterfaceType(f.Prog, t) } mi := &MakeInterface{X: val} diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index 2ed63bfd5..010c128a9 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -186,6 +186,20 @@ func targetedBlock(f *Function, tok token.Token) *BasicBlock { return targetedBlock(f.parent, tok) } +// instrs returns an iterator that returns each reachable instruction of the SSA function. +// TODO: return an iter.Seq once x/tools is on 1.23 +func (f *Function) instrs() func(yield func(i Instruction) bool) { + return func(yield func(i Instruction) bool) { + for _, block := range f.Blocks { + for _, instr := range block.Instrs { + if !yield(instr) { + return + } + } + } + } +} + // addResultVar adds a result for a variable v to f.results and v to f.returnVars. func (f *Function) addResultVar(v *types.Var) { result := emitLocalVar(f, v) diff --git a/vendor/golang.org/x/tools/go/ssa/methods.go b/vendor/golang.org/x/tools/go/ssa/methods.go index b9560183a..4b116f430 100644 --- a/vendor/golang.org/x/tools/go/ssa/methods.go +++ b/vendor/golang.org/x/tools/go/ssa/methods.go @@ -11,7 +11,7 @@ import ( "go/types" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) // MethodValue returns the Function implementing method sel, building @@ -158,124 +158,23 @@ type methodSet struct { // // Thread-safe. // -// Acquires prog.runtimeTypesMu. +// Acquires prog.makeInterfaceTypesMu. func (prog *Program) RuntimeTypes() []types.Type { - prog.runtimeTypesMu.Lock() - defer prog.runtimeTypesMu.Unlock() - return prog.runtimeTypes.Keys() -} - -// forEachReachable calls f for type T and each type reachable from -// its type through reflection. -// -// The function f must use memoization to break cycles and -// return false when the type has already been visited. -// -// TODO(adonovan): publish in typeutil and share with go/callgraph/rta. -func forEachReachable(msets *typeutil.MethodSetCache, T types.Type, f func(types.Type) bool) { - var visit func(T types.Type, skip bool) - visit = func(T types.Type, skip bool) { - if !skip { - if !f(T) { - return - } - } - - // Recursion over signatures of each method. - tmset := msets.MethodSet(T) - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) - // It is tempting to call visit(sig, false) - // but, as noted in golang.org/cl/65450043, - // the Signature.Recv field is ignored by - // types.Identical and typeutil.Map, which - // is confusing at best. - // - // More importantly, the true signature rtype - // reachable from a method using reflection - // has no receiver but an extra ordinary parameter. - // For the Read method of io.Reader we want: - // func(Reader, []byte) (int, error) - // but here sig is: - // func([]byte) (int, error) - // with .Recv = Reader (though it is hard to - // notice because it doesn't affect Signature.String - // or types.Identical). - // - // TODO(adonovan): construct and visit the correct - // non-method signature with an extra parameter - // (though since unnamed func types have no methods - // there is essentially no actual demand for this). - // - // TODO(adonovan): document whether or not it is - // safe to skip non-exported methods (as RTA does). - visit(sig.Params(), true) // skip the Tuple - visit(sig.Results(), true) // skip the Tuple - } - - switch T := T.(type) { - case *aliases.Alias: - visit(aliases.Unalias(T), skip) // emulates the pre-Alias behavior - - case *types.Basic: - // nop - - case *types.Interface: - // nop---handled by recursion over method set. - - case *types.Pointer: - visit(T.Elem(), false) - - case *types.Slice: - visit(T.Elem(), false) - - case *types.Chan: - visit(T.Elem(), false) - - case *types.Map: - visit(T.Key(), false) - visit(T.Elem(), false) - - case *types.Signature: - if T.Recv() != nil { - panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) - } - visit(T.Params(), true) // skip the Tuple - visit(T.Results(), true) // skip the Tuple - - case *types.Named: - // A pointer-to-named type can be derived from a named - // type via reflection. It may have methods too. - visit(types.NewPointer(T), false) - - // Consider 'type T struct{S}' where S has methods. - // Reflection provides no way to get from T to struct{S}, - // only to S, so the method set of struct{S} is unwanted, - // so set 'skip' flag during recursion. - visit(T.Underlying(), true) // skip the unnamed type - - case *types.Array: - visit(T.Elem(), false) - - case *types.Struct: - for i, n := 0, T.NumFields(); i < n; i++ { - // TODO(adonovan): document whether or not - // it is safe to skip non-exported fields. - visit(T.Field(i).Type(), false) - } - - case *types.Tuple: - for i, n := 0, T.Len(); i < n; i++ { - visit(T.At(i).Type(), false) - } - - case *types.TypeParam, *types.Union: - // forEachReachable must not be called on parameterized types. - panic(T) - - default: - panic(T) - } + prog.makeInterfaceTypesMu.Lock() + defer prog.makeInterfaceTypesMu.Unlock() + + // Compute the derived types on demand, since many SSA clients + // never call RuntimeTypes, and those that do typically call + // it once (often within ssautil.AllFunctions, which will + // eventually not use it; see Go issue #69291.) This + // eliminates the need to eagerly compute all the element + // types during SSA building. + var runtimeTypes []types.Type + add := func(t types.Type) { runtimeTypes = append(runtimeTypes, t) } + var set typeutil.Map // for de-duping identical types + for t := range prog.makeInterfaceTypes { + typesinternal.ForEachElement(&set, &prog.MethodSets, t, add) } - visit(T, false) + + return runtimeTypes } diff --git a/vendor/golang.org/x/tools/go/ssa/print.go b/vendor/golang.org/x/tools/go/ssa/print.go index c890d7ee5..ef32672a2 100644 --- a/vendor/golang.org/x/tools/go/ssa/print.go +++ b/vendor/golang.org/x/tools/go/ssa/print.go @@ -39,16 +39,8 @@ func relName(v Value, i Instruction) string { return v.Name() } -// normalizeAnyForTesting controls whether we replace occurrences of -// interface{} with any. It is only used for normalizing test output. -var normalizeAnyForTesting bool - func relType(t types.Type, from *types.Package) string { - s := types.TypeString(t, types.RelativeTo(from)) - if normalizeAnyForTesting { - s = strings.ReplaceAll(s, "interface{}", "any") - } - return s + return types.TypeString(t, types.RelativeTo(from)) } func relTerm(term *types.Term, from *types.Package) string { diff --git a/vendor/golang.org/x/tools/go/ssa/sanity.go b/vendor/golang.org/x/tools/go/ssa/sanity.go index 285cba04a..ef2928e3b 100644 --- a/vendor/golang.org/x/tools/go/ssa/sanity.go +++ b/vendor/golang.org/x/tools/go/ssa/sanity.go @@ -407,14 +407,87 @@ func (s *sanity) checkReferrerList(v Value) { } } +func (s *sanity) checkFunctionParams() { + signature := s.fn.Signature + params := s.fn.Params + + // startSigParams is the start of signature.Params() within params. + startSigParams := 0 + if signature.Recv() != nil { + startSigParams = 1 + } + + if startSigParams+signature.Params().Len() != len(params) { + s.errorf("function has %d parameters in signature but has %d after building", + startSigParams+signature.Params().Len(), len(params)) + return + } + + for i, param := range params { + var sigType types.Type + si := i - startSigParams + if si < 0 { + sigType = signature.Recv().Type() + } else { + sigType = signature.Params().At(si).Type() + } + + if !types.Identical(sigType, param.Type()) { + s.errorf("expect type %s in signature but got type %s in param %d", param.Type(), sigType, i) + } + } +} + +// checkTransientFields checks whether all transient fields of Function are cleared. +func (s *sanity) checkTransientFields() { + fn := s.fn + if fn.build != nil { + s.errorf("function transient field 'build' is not nil") + } + if fn.currentBlock != nil { + s.errorf("function transient field 'currentBlock' is not nil") + } + if fn.vars != nil { + s.errorf("function transient field 'vars' is not nil") + } + if fn.results != nil { + s.errorf("function transient field 'results' is not nil") + } + if fn.returnVars != nil { + s.errorf("function transient field 'returnVars' is not nil") + } + if fn.targets != nil { + s.errorf("function transient field 'targets' is not nil") + } + if fn.lblocks != nil { + s.errorf("function transient field 'lblocks' is not nil") + } + if fn.subst != nil { + s.errorf("function transient field 'subst' is not nil") + } + if fn.jump != nil { + s.errorf("function transient field 'jump' is not nil") + } + if fn.deferstack != nil { + s.errorf("function transient field 'deferstack' is not nil") + } + if fn.source != nil { + s.errorf("function transient field 'source' is not nil") + } + if fn.exits != nil { + s.errorf("function transient field 'exits' is not nil") + } + if fn.uniq != 0 { + s.errorf("function transient field 'uniq' is not zero") + } +} + func (s *sanity) checkFunction(fn *Function) bool { - // TODO(adonovan): check Function invariants: - // - check params match signature - // - check transient fields are nil - // - warn if any fn.Locals do not appear among block instructions. + s.fn = fn + s.checkFunctionParams() + s.checkTransientFields() // TODO(taking): Sanity check origin, typeparams, and typeargs. - s.fn = fn if fn.Prog == nil { s.errorf("nil Prog") } @@ -452,7 +525,23 @@ func (s *sanity) checkFunction(fn *Function) bool { s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) } } + + // Build the set of valid referrers. + s.instrs = make(map[Instruction]unit) + + // TODO: switch to range-over-func when x/tools updates to 1.23. + // instrs are the instructions that are present in the function. + fn.instrs()(func(instr Instruction) bool { + s.instrs[instr] = unit{} + return true + }) + + // Check all Locals allocations appear in the function instruction. for i, l := range fn.Locals { + if _, present := s.instrs[l]; !present { + s.warnf("function doesn't contain Local alloc %s", l.Name()) + } + if l.Parent() != fn { s.errorf("Local %s at index %d has wrong parent", l.Name(), i) } @@ -460,13 +549,6 @@ func (s *sanity) checkFunction(fn *Function) bool { s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) } } - // Build the set of valid referrers. - s.instrs = make(map[Instruction]unit) - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - s.instrs[instr] = unit{} - } - } for i, p := range fn.Params { if p.Parent() != fn { s.errorf("Param %s at index %d has wrong parent", p.Name(), i) @@ -527,6 +609,19 @@ func sanityCheckPackage(pkg *Package) { if pkg.Pkg == nil { panic(fmt.Sprintf("Package %s has no Object", pkg)) } + if pkg.info != nil { + panic(fmt.Sprintf("package %s field 'info' is not cleared", pkg)) + } + if pkg.files != nil { + panic(fmt.Sprintf("package %s field 'files' is not cleared", pkg)) + } + if pkg.created != nil { + panic(fmt.Sprintf("package %s field 'created' is not cleared", pkg)) + } + if pkg.initVersion != nil { + panic(fmt.Sprintf("package %s field 'initVersion' is not cleared", pkg)) + } + _ = pkg.String() // must not crash for name, mem := range pkg.Members { diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go index 1231afd9e..4fa983107 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssa.go +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -37,8 +37,9 @@ type Program struct { hasParamsMu sync.Mutex hasParams typeparams.Free - runtimeTypesMu sync.Mutex - runtimeTypes typeutil.Map // set of runtime types (from MakeInterface) + // set of concrete types used as MakeInterface operands + makeInterfaceTypesMu sync.Mutex + makeInterfaceTypes map[types.Type]unit // (may contain redundant identical types) // objectMethods is a memoization of objectMethod // to avoid creation of duplicate methods from type information. @@ -341,7 +342,7 @@ type Function struct { // source information Synthetic string // provenance of synthetic function; "" for true source functions syntax ast.Node // *ast.Func{Decl,Lit}, if from syntax (incl. generic instances) or (*ast.RangeStmt if a yield function) - info *types.Info // type annotations (iff syntax != nil) + info *types.Info // type annotations (if syntax != nil) goversion string // Go version of syntax (NB: init is special) parent *Function // enclosing function if anon; nil if global diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/deprecated.go b/vendor/golang.org/x/tools/go/ssa/ssautil/deprecated.go new file mode 100644 index 000000000..4feff7131 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/deprecated.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil + +// This file contains deprecated public APIs. +// We discourage their use. + +import ( + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/ssa" +) + +// CreateProgram returns a new program in SSA form, given a program +// loaded from source. An SSA package is created for each transitively +// error-free package of lprog. +// +// Code for bodies of functions is not built until Build is called +// on the result. +// +// The mode parameter controls diagnostics and checking during SSA construction. +// +// Deprecated: Use [golang.org/x/tools/go/packages] and the [Packages] +// function instead; see ssa.Example_loadPackages. +func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { + prog := ssa.NewProgram(lprog.Fset, mode) + + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + + return prog +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go index 3daa67a07..51fba0545 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go @@ -11,7 +11,6 @@ import ( "go/token" "go/types" - "golang.org/x/tools/go/loader" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/ssa" "golang.org/x/tools/internal/versions" @@ -111,29 +110,6 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (* return prog, ssapkgs } -// CreateProgram returns a new program in SSA form, given a program -// loaded from source. An SSA package is created for each transitively -// error-free package of lprog. -// -// Code for bodies of functions is not built until Build is called -// on the result. -// -// The mode parameter controls diagnostics and checking during SSA construction. -// -// Deprecated: Use [golang.org/x/tools/go/packages] and the [Packages] -// function instead; see ssa.Example_loadPackages. -func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { - prog := ssa.NewProgram(lprog.Fset, mode) - - for _, info := range lprog.AllPackages { - if info.TransitivelyErrorFree { - prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) - } - } - - return prog -} - // BuildPackage builds an SSA program with SSA intermediate // representation (IR) for all functions of a single package. // diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go index 4dcb87157..fc870235c 100644 --- a/vendor/golang.org/x/tools/go/ssa/subst.go +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -144,7 +144,7 @@ func (subst *subster) typ(t types.Type) (res types.Type) { case *types.Interface: return subst.interface_(t) - case *aliases.Alias: + case *types.Alias: return subst.alias(t) case *types.Named: @@ -317,7 +317,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { return types.NewInterfaceType(methods, embeds).Complete() } -func (subst *subster) alias(t *aliases.Alias) types.Type { +func (subst *subster) alias(t *types.Alias) types.Type { // See subster.named. This follows the same strategy. tparams := aliases.TypeParams(t) targs := aliases.TypeArgs(t) @@ -365,19 +365,19 @@ func (subst *subster) alias(t *aliases.Alias) types.Type { rhs := subst.typ(aliases.Rhs(t)) // Create the fresh alias. - obj := aliases.NewAlias(true, tname.Pos(), tname.Pkg(), tname.Name(), rhs) - fresh := obj.Type() - if fresh, ok := fresh.(*aliases.Alias); ok { - // TODO: assume ok when aliases are always materialized (go1.27). - aliases.SetTypeParams(fresh, newTParams) - } + // + // Until 1.27, the result of aliases.NewAlias(...).Type() cannot guarantee it is a *types.Alias. + // However, as t is an *alias.Alias and t is well-typed, then aliases must have been enabled. + // Follow this decision, and always enable aliases here. + const enabled = true + obj := aliases.NewAlias(enabled, tname.Pos(), tname.Pkg(), tname.Name(), rhs, newTParams) // Substitute into all of the constraints after they are created. for i, ntp := range newTParams { bound := tparams.At(i).Constraint() ntp.SetConstraint(subst.typ(bound)) } - return fresh + return obj.Type() } // t is declared within the function origin and has type arguments. @@ -633,7 +633,7 @@ func reaches(t types.Type, c map[types.Type]bool) (res bool) { return true } } - case *types.Named, *aliases.Alias: + case *types.Named, *types.Alias: return reaches(t.Underlying(), c) default: panic("unreachable") diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index 549c9c819..cdc46209e 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -15,9 +15,7 @@ import ( "os" "sync" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" ) @@ -36,7 +34,7 @@ func assert(p bool, msg string) { //// AST utilities -func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } +func unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } // isBlankIdent returns true iff e is an Ident with name "_". // They have no associated types.Object, and thus no type. @@ -45,13 +43,6 @@ func isBlankIdent(e ast.Expr) bool { return ok && id.Name == "_" } -// rangePosition is the position to give for the `range` token in a RangeStmt. -var rangePosition = func(rng *ast.RangeStmt) token.Pos { - // Before 1.20, this is unreachable. - // rng.For is a close, but incorrect position. - return rng.For -} - //// Type utilities. Some of these belong in go/types. // isNonTypeParamInterface reports whether t is an interface type but not a type parameter. @@ -268,7 +259,7 @@ func instanceArgs(info *types.Info, id *ast.Ident) []types.Type { return targs } -// Mapping of a type T to a canonical instance C s.t. types.Indentical(T, C). +// Mapping of a type T to a canonical instance C s.t. types.Identical(T, C). // Thread-safe. type canonizer struct { mu sync.Mutex @@ -295,7 +286,7 @@ func (c *canonizer) List(ts []types.Type) *typeList { // Is there some top level alias? var found bool for _, t := range ts { - if _, ok := t.(*aliases.Alias); ok { + if _, ok := t.(*types.Alias); ok { found = true break } @@ -306,7 +297,7 @@ func (c *canonizer) List(ts []types.Type) *typeList { cp := make([]types.Type, len(ts)) // copy with top level aliases removed. for i, t := range ts { - cp[i] = aliases.Unalias(t) + cp[i] = types.Unalias(t) } return cp } @@ -323,7 +314,7 @@ func (c *canonizer) List(ts []types.Type) *typeList { // For performance, reasons the canonical instance is order-dependent, // and may contain deeply nested aliases. func (c *canonizer) Type(T types.Type) types.Type { - T = aliases.Unalias(T) // remove the top level alias. + T = types.Unalias(T) // remove the top level alias. c.mu.Lock() defer c.mu.Unlock() @@ -403,10 +394,10 @@ func (m *typeListMap) hash(ts []types.Type) uint32 { // instantiateMethod instantiates m with targs and returns a canonical representative for this method. func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *types.Context) *types.Func { recv := recvType(m) - if p, ok := aliases.Unalias(recv).(*types.Pointer); ok { + if p, ok := types.Unalias(recv).(*types.Pointer); ok { recv = p.Elem() } - named := aliases.Unalias(recv).(*types.Named) + named := types.Unalias(recv).(*types.Named) inst, err := types.Instantiate(ctxt, named.Origin(), targs, false) if err != nil { panic(err) diff --git a/vendor/golang.org/x/tools/go/ssa/util_go120.go b/vendor/golang.org/x/tools/go/ssa/util_go120.go deleted file mode 100644 index 9e8ea874e..000000000 --- a/vendor/golang.org/x/tools/go/ssa/util_go120.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package ssa - -import ( - "go/ast" - "go/token" -) - -func init() { - rangePosition = func(rng *ast.RangeStmt) token.Pos { return rng.Range } -} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 9ada17775..a70b727f2 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -228,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -280,7 +280,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - if alias, ok := T.(*aliases.Alias); ok { + if alias, ok := T.(*types.Alias); ok { if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { return Path(r), nil } @@ -320,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -449,8 +449,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return find(obj, types.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -626,7 +626,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -664,7 +664,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = named.Underlying() case opRhs: - if alias, ok := t.(*aliases.Alias); ok { + if alias, ok := t.(*types.Alias); ok { t = aliases.Rhs(alias) } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 90dc541ad..754380351 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -8,7 +8,6 @@ import ( "go/ast" "go/types" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/typeparams" ) @@ -17,7 +16,7 @@ import ( // // Functions and methods may potentially have type parameters. func Callee(info *types.Info, call *ast.CallExpr) types.Object { - fun := astutil.Unparen(call.Fun) + fun := ast.Unparen(call.Fun) // Look through type instantiation if necessary. isInstance := false diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index a92f80dd2..8d824f714 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -12,7 +12,6 @@ import ( "go/types" "reflect" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -260,8 +259,8 @@ func (h Hasher) hashFor(t types.Type) uint32 { case *types.Basic: return uint32(t.Kind()) - case *aliases.Alias: - return h.Hash(aliases.Unalias(t)) + case *types.Alias: + return h.Hash(types.Unalias(t)) case *types.Array: return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) @@ -461,8 +460,8 @@ func (h Hasher) shallowHash(t types.Type) uint32 { // elements (mostly Slice, Pointer, Basic, Named), // so there's no need to optimize anything else. switch t := t.(type) { - case *aliases.Alias: - return h.shallowHash(aliases.Unalias(t)) + case *types.Alias: + return h.shallowHash(types.Unalias(t)) case *types.Signature: var hash uint32 = 604171 diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go index bd71aafaa..f7666028f 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -9,8 +9,6 @@ package typeutil import ( "go/types" "sync" - - "golang.org/x/tools/internal/aliases" ) // A MethodSetCache records the method set of each type T for which @@ -34,12 +32,12 @@ func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { cache.mu.Lock() defer cache.mu.Unlock() - switch T := aliases.Unalias(T).(type) { + switch T := types.Unalias(T).(type) { case *types.Named: return cache.lookupNamed(T).value case *types.Pointer: - if N, ok := aliases.Unalias(T.Elem()).(*types.Named); ok { + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { return cache.lookupNamed(N).pointer } } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go index a0c1a60ac..9dda6a25d 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -8,8 +8,6 @@ package typeutil import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // IntuitiveMethodSet returns the intuitive method set of a type T, @@ -28,7 +26,7 @@ import ( // The order of the result is as for types.MethodSet(T). func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { isPointerToConcrete := func(T types.Type) bool { - ptr, ok := aliases.Unalias(T).(*types.Pointer) + ptr, ok := types.Unalias(T).(*types.Pointer) return ok && !types.IsInterface(ptr.Elem()) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee4..b9425f5a2 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index 6652f7db0..000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } -func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } -func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } -func Origin(alias *Alias) *Alias { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index 3ef1afeb4..7716a3331 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,22 +11,19 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) } // TypeParams returns the type parameter list of the alias. -func TypeParams(alias *Alias) *types.TypeParamList { +func TypeParams(alias *types.Alias) *types.TypeParamList { if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { return alias.TypeParams() // go1.23+ } @@ -37,7 +31,7 @@ func TypeParams(alias *Alias) *types.TypeParamList { } // SetTypeParams sets the type parameters of the alias type. -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { if alias, ok := any(alias).(interface { SetTypeParams(tparams []*types.TypeParam) }); ok { @@ -48,7 +42,7 @@ func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { } // TypeArgs returns the type arguments used to instantiate the Alias type. -func TypeArgs(alias *Alias) *types.TypeList { +func TypeArgs(alias *types.Alias) *types.TypeList { if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { return alias.TypeArgs() // go1.23+ } @@ -57,26 +51,13 @@ func TypeArgs(alias *Alias) *types.TypeList { // Origin returns the generic Alias type of which alias is an instance. // If alias is not an instance of a generic alias, Origin returns alias. -func Origin(alias *Alias) *Alias { +func Origin(alias *types.Alias) *types.Alias { if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { return alias.Origin() // go1.23+ } return alias // not an instance of a generic alias (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a -} - // Enabled reports whether [NewAlias] should create [types.Alias] types. // // This function is expensive! Call it sparingly. @@ -92,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index e0b13e70a..24755b412 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -17,7 +17,6 @@ import ( "strconv" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/aliases" ) func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { @@ -34,7 +33,7 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { // TODO(adonovan): think about generics, and also generic aliases. - under := aliases.Unalias(typ) + under := types.Unalias(typ) // Don't call Underlying unconditionally: although it removes // Named and Alias, it also removes TypeParam. if n, ok := under.(*types.Named); ok { @@ -416,8 +415,7 @@ func CheckReadable(pass *analysis.Pass, filename string) error { return nil } for _, f := range pass.Files { - // TODO(adonovan): use go1.20 f.FileStart - if pass.Fset.File(f.Pos()).Name() == filename { + if pass.Fset.File(f.FileStart).Name() == filename { return nil } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2a..d79a605ed 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df91124..e6c5d51f8 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -232,14 +232,19 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified: emitted by cmd/compile since go1.20. _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f31..1e19fbed8 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,7 +242,6 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -223,7 +440,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -507,13 +724,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +740,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +974,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1090,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa0365..21908a158 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See iexport.go for the export data format. // This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. @@ -53,6 +53,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -562,14 +563,14 @@ func (r *importReader) obj(name string) { pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + r.declare(obj) case constTag: typ, val := r.value() @@ -615,7 +616,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +646,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -852,7 +853,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +863,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +960,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1052,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d0..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40f..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 000000000..907c8557a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b6..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cad..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c0770688..1db408613 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +168,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -471,7 +474,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 2e59ff855..e333efc87 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index dc7d50a7a..c15108178 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -131,7 +131,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename continue } - f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution) if err != nil { continue } @@ -1620,6 +1620,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } fullFile := filepath.Join(dir, fi.Name()) + // Legacy ast.Object resolution is needed here. f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { env.logf("error parsing %v: %v", fullFile, err) diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f83465520..ff6b59a58 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -86,7 +86,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - parserMode := parser.Mode(0) + parserMode := parser.SkipObjectResolution if opt.Comments { parserMode |= parser.ParseComments } @@ -165,7 +165,7 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - parserMode := parser.Mode(0) + var parserMode parser.Mode // legacy ast.Object resolution is required here if opt.Comments { parserMode |= parser.ParseComments } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 91221fda3..8555e3f83 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe // 2. Use this to separate module cache scanning from other scanning. func gomodcacheForEnv(goenv map[string]string) string { if gmc := goenv["GOMODCACHE"]; gmc != "" { - return gmc + // golang/go#67156: ensure that the module cache is clean, since it is + // assumed as a prefix to directories scanned by gopathwalk, which are + // themselves clean. + return filepath.Clean(gmc) } gopaths := filepath.SplitList(goenv["GOPATH"]) if len(gopaths) == 0 { @@ -740,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { subdir := "" - if dir != root.Path { - subdir = dir[len(root.Path)+len("/"):] + if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) { + subdir = dir[len(prefix):] } importPath := filepath.ToSlash(subdir) if strings.HasPrefix(importPath, "vendor/") { diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index b92e8e6eb..f6cb37c5c 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,7 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 + version Version // sync indicates whether the file uses sync markers. sync bool @@ -68,8 +68,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, @@ -80,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -102,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -136,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -193,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -244,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -515,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a4..c17a12399 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7ad..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b28..50534a295 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef71..1520b73af 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5f..582ad56d3 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 000000000..53af9df22 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index a928acf29..cdaac9ab3 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, - {"QUICConfig.EnableStoreSessionEvent", Field, 23}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a36..000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 89bd256dc..0b84acc5c 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -16,8 +16,6 @@ import ( "go/ast" "go/token" "go/types" - - "golang.org/x/tools/internal/aliases" ) // UnpackIndexExpr extracts data from AST nodes that represent index @@ -65,7 +63,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke // IsTypeParam reports whether t is a type parameter (or an alias of one). func IsTypeParam(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.TypeParam) + _, ok := types.Unalias(t).(*types.TypeParam) return ok } @@ -93,8 +91,8 @@ func IsTypeParam(t types.Type) bool { // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { - V = aliases.Unalias(V) - T = aliases.Unalias(T) + V = types.Unalias(V) + T = types.Unalias(T) // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go index a1d138226..358108268 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/free.go +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -6,8 +6,6 @@ package typeparams import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // Free is a memoization of the set of free type parameters within a @@ -37,8 +35,8 @@ func (w *Free) Has(typ types.Type) (res bool) { case nil, *types.Basic: // TODO(gri) should nil be handled here? break - case *aliases.Alias: - return w.Has(aliases.Unalias(t)) + case *types.Alias: + return w.Has(types.Unalias(t)) case *types.Array: return w.Has(t.Elem()) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 000000000..4957f0216 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e05381..131caab28 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b75..ba6f4f4eb 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,8 +6,6 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the @@ -15,11 +13,11 @@ import ( // It also reports whether a Pointer was present. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a53..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go deleted file mode 100644 index f65beed9d..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package versions - -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go deleted file mode 100644 index b7ef216df..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package versions - -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21f..f0bb0d15f 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,34 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d334..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index aac5db62c..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index aa69fb4d5..4a9fce53c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,6 +180,8 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` } func (x *CommonLanguageSettings) Reset() { @@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { + if x != nil { + return x.SelectiveGapicGeneration + } + return nil +} + // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -984,6 +993,16 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GoSettings) Reset() { @@ -1025,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *GoSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1123,6 +1149,57 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +type SelectiveGapicGeneration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"` +} + +func (x *SelectiveGapicGeneration) Reset() { + *x = SelectiveGapicGeneration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectiveGapicGeneration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectiveGapicGeneration) ProtoMessage() {} + +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead. +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{12} +} + +func (x *SelectiveGapicGeneration) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1136,12 +1213,17 @@ type PythonSettings_ExperimentalFeatures struct { // This feature will be enabled by default 1 month after launching the // feature in preview packages. RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` } func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1236,7 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1177,6 +1259,13 @@ func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { return false } +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { + if x != nil { + return x.ProtobufPythonicTypesEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1205,7 +1294,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[16] + mi := &file_google_api_client_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1218,7 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[16] + mi := &file_google_api_client_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1406,7 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1415,251 +1504,275 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, + 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, + 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, + 0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, + 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, - 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, - 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, - 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, - 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, - 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, - 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, - 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, - 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, - 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, - 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, - 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, - 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, + 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, - 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, - 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, - 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, - 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, - 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, - 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, - 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, - 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34, + 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, + 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, + 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, + 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, + 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, + 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, + 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1675,7 +1788,7 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination @@ -1691,55 +1804,59 @@ var file_google_api_client_proto_goTypes = []interface{}{ (*RubySettings)(nil), // 11: google.api.RubySettings (*GoSettings)(nil), // 12: google.api.GoSettings (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures - nil, // 16: google.api.DotnetSettings.RenamedServicesEntry - nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 19: google.api.LaunchStage - (*durationpb.Duration)(nil), // 20: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration + nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures + nil, // 17: google.api.DotnetSettings.RenamedServicesEntry + nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 19: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 21: google.api.LaunchStage + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures - 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 29, // [29:33] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 31, // [31:35] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1892,7 +2009,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectiveGapicGeneration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -1904,7 +2033,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1923,7 +2052,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 17, + NumMessages: 19, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e3..f388426b0 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e..d9bfa6e1e 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a..3a2092f10 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -130,7 +130,7 @@ type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and + // If it's not in the list, the connection will gracefully close, and // a new connection will be created. // // This will trigger a state transition for the SubConn. @@ -142,8 +142,11 @@ type SubConn interface { Connect() // GetOrBuildProducer returns a reference to the existing Producer for this // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) // Shutdown shuts down the SubConn gracefully. Any started RPCs will be // allowed to complete. No future calls should be made on the SubConn. @@ -452,8 +455,10 @@ type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. Build(grpcClientConnInterface any) (p Producer, close func()) } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8..d5ed172ae 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 000000000..c51978945 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 5b592f48a..e069346a7 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -26,18 +26,23 @@ import ( "math/rand" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -50,7 +55,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 000000000..985b6edc7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,625 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.state + sd.state = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.state == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 6561b769e..2a4f2878a 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,12 +24,14 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) @@ -192,7 +194,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -256,8 +258,8 @@ type acBalancerWrapper struct { ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer } // updateState is invoked by grpc to push a subConn state update to the @@ -267,6 +269,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. @@ -275,16 +280,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve setConnectedAddress(&scs, curAddr) } acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } }) } @@ -301,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -308,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -335,15 +332,15 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. @@ -353,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index fcd1cfe80..55bffaa77 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 +// protoc-gen-go v1.34.2 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3f..19763f8ed 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -825,14 +825,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -1141,10 +1140,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1179,8 +1183,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1251,6 +1254,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1292,7 +1297,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1504,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443..4c805c644 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 411435854..e163a473d 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 27c1b9bb6..518692c3a 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go index 0d9a824ce..e524fdd40 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -81,7 +81,7 @@ func (l *LoggerWrapper) Errorf(format string, args ...any) { } // V reports whether verbosity level l is at least the requested verbose level. -func (*LoggerWrapper) V(l int) bool { +func (*LoggerWrapper) V(int) bool { // Returns true for all verbose level. return true } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index e65cf0ea1..d92335445 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 +// protoc-gen-go v1.34.2 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a926..85540f86a 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a87..966932891 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54..3ec662799 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index bb531225d..64c791953 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e150..078bb8123 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6e..b5a824992 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b2..90103847c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba..b20802e6e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a..0e6e18e18 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b867403..2bffe4777 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 00abc7c2b..6e7dd6b77 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,11 +45,16 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d6392..8e8e86128 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775..683d1955c 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c..2c13ee9da 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 73fa407b6..20b4dc3d3 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -191,6 +191,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -203,11 +205,7 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false - - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + UserSetDefaultScheme = false // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. @@ -235,7 +233,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b..374c12fb7 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -177,7 +177,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +237,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572..b901c7bac 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index be110d41f..79044657b 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { } } +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand } } +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, } } +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand } } +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc8205..1186f1e9a 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd7..54c24c2ff 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ea0633bbd..ef72fbb3a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -1033,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index e1cd86b2f..ce878693b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -475,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f46194fdc..62b81885d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -86,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -335,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -527,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -772,7 +774,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -1008,6 +1010,9 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the @@ -1025,11 +1030,13 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1316,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } streamsToClose := make([]*Stream, 0) @@ -1393,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1628,7 +1633,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1667,11 +1678,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + errClose = connectionErrorf(true, err, "error reading from server: %v", err) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1685,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1696,15 +1706,15 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1728,7 +1738,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1770,7 +1780,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index f5163f770..584b50fe5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -1117,7 +1117,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. @@ -1238,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index f609c6c66..3613d7b64 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -393,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86c..e12cb0bc9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -547,6 +547,15 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. func (s *Stream) ReadHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { @@ -616,7 +625,7 @@ func (t *transportReader) ReadHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(len(header)) + t.windowHandler(n) return n, nil } diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7..eb42b19fb 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go index 975ceb718..ecbf0b9a7 100644 --- a/vendor/google.golang.org/grpc/mem/buffers.go +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -65,6 +65,9 @@ var ( refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} ) +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. func IsBelowBufferPoolingThreshold(size int) bool { return size <= bufferPoolingThreshold } @@ -89,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() @@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) { return n, b } -// String returns a string representation of the buffer. May be used for -// debugging purposes. func (b *buffer) String() string { return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) } +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { return buf.read(dst) } // SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { return buf.split(n) } @@ -224,20 +231,29 @@ func (e emptyBuffer) Len() int { return 0 } -func (e emptyBuffer) split(n int) (left, right Buffer) { +func (e emptyBuffer) split(int) (left, right Buffer) { return e, e } -func (e emptyBuffer) read(buf []byte) (int, Buffer) { +func (e emptyBuffer) read([]byte) (int, Buffer) { return 0, e } +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. type SliceBuffer []byte +// ReadOnlyData returns the byte slice. func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } func (s SliceBuffer) split(n int) (left, right Buffer) { return s[:n], s[n:] diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2c..09e864a89 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index db8865ec3..aba1ae3e6 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,8 +264,8 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } @@ -304,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -339,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -363,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -387,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -410,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -438,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -475,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -514,7 +514,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} // ForceCodecV2 returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -554,7 +554,7 @@ func (o ForceCodecV2CallOption) before(c *callInfo) error { return nil } -func (o ForceCodecV2CallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -579,7 +579,7 @@ func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -607,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -791,9 +791,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool if !haveCompressor { if isServer { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c..0037fee0b 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 7c70005d0..5a47094ae 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.66.2" +const Version = "1.68.1" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b..737d6876d 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } if fd == nil { // Field is unknown. @@ -351,7 +346,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df22..0e72d8537 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c820..e9fe10394 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa switch tok.Kind() { case json.ObjectClose: if !found { - return d.newError(tok.Pos(), `missing "value" field`) + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac4..b53805056 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c8..024ffebd3 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a38360..5a57ef6f3 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d..bf1aba0e8 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,9 +5,14 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 ) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c7604..669133d04 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd34920..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cde..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b..688aabe43 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -32,6 +31,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,31 +77,48 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool + // APILevel controls which API (Open, Hybrid or Opaque) should be used + // for generated code (.pb.go files). + APILevel int } ) @@ -257,7 +274,7 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -350,7 +367,8 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -376,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { @@ -425,6 +438,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +479,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b0..d2f549497 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8d..d4c94458b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,8 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b..10132c9b3 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_ApiLevel_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.APILevel = int(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } @@ -68,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea44..e1b4130bd 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd34..a06ccabc2 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd0121..d9b9d916a 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b4..f5ee7f5c2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,59 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.APILevel. +const ( + GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel" + GoFeatures_APILevel_enum_name = "APILevel" +) + +// Enum values for pb.GoFeatures.APILevel. +const ( + GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0 + GoFeatures_API_OPEN_enum_value = 1 + GoFeatures_API_HYBRID_enum_value = 2 + GoFeatures_API_OPAQUE_enum_value = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 +) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e1..99bb95baf 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff..bef5a25fb 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/google.golang.org/protobuf/internal/genid/name.go similarity index 50% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go120.go rename to vendor/google.golang.org/protobuf/internal/genid/name.go index 1a9efa126..224f33930 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ b/vendor/google.golang.org/protobuf/internal/genid/name.go @@ -2,13 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.20 -// +build go1.20 +package genid -package versions +const ( + NoUnkeyedLiteral_goname = "noUnkeyedLiteral" + NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral" -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} + BuilderSuffix_goname = "_builder" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85..9404270de 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go new file mode 100644 index 000000000..6075d6f69 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "strconv" + "sync/atomic" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (Export) UnmarshalField(msg any, fieldNum int32) { + UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum)) +} + +// Present checks the presence set for a certain field number (zero +// based, ordered by appearance in original proto file). part is +// a pointer to the correct element in the bitmask array, num is the +// field number unaltered. Example (field number 70 -> part = +// &m.XXX_presence[1], num = 70) +func (Export) Present(part *uint32, num uint32) bool { + // This hook will read an unprotected shadow presence set if + // we're unning under the race detector + raceDetectHookPresent(part, num) + return atomic.LoadUint32(part)&(1<<(num%32)) > 0 +} + +// SetPresent adds a field to the presence set. part is a pointer to +// the relevant element in the array and num is the field number +// unaltered. size is the number of fields in the protocol +// buffer. +func (Export) SetPresent(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) { + return + } + } +} + +// SetPresentNonAtomic is like SetPresent, but operates non-atomically. +// It is meant for use by builder methods, where the message is known not +// to be accessible yet by other goroutines. +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + *part |= 1 << (num % 32) +} + +// ClearPresence removes a field from the presence set. part is a +// pointer to the relevant element in the presence array and num is +// the field number unaltered. +func (Export) ClearPresent(part *uint32, num uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookClearPresent(part, num) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) { + return + } + } +} + +// interfaceToPointer takes a pointer to an empty interface whose value is a +// pointer type, and converts it into a "pointer" that points to the same +// target +func interfaceToPointer(i *any) pointer { + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +func (p pointer) atomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) atomicSetPointer(q pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), q.p) +} + +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a +// pointer) and returns true if the pointed-to pointer is nil (using an +// atomic load). This function is inlineable and, on x86, just becomes a +// simple load and compare. +func (Export) AtomicCheckPointerIsNil(ptr any) bool { + return interfaceToPointer(&ptr).atomicGetPointer().IsNil() +} + +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer, +// second is a pointer) and atomically sets the second pointer into location +// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline +// (even on x86), so this does not become a simple store on x86. +func (Export) AtomicSetPointer(dstPtr, valPtr any) { + interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr)) +} + +// AtomicLoadPointer loads the pointer at the location pointed at by src, +// and stores that pointer value into the location pointed at by dst. +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) +} + +// AtomicInitializePointer makes ptr and dst point to the same value. +// +// If *ptr is a nil pointer, it sets *ptr = *dst. +// +// If *ptr is a non-nil pointer, it sets *dst = *ptr. +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) { + if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) + } +} + +// MessageFieldStringOf returns the field formatted as a string, +// either as the field name if resolvable otherwise as a decimal string. +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string { + fd := md.Fields().ByNumber(n) + if fd != nil { + return string(fd.Name()) + } + return strconv.Itoa(int(n)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go new file mode 100644 index 000000000..ea276547c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package impl + +// There is no additional data as we're not running under race detector. +type RaceDetectHookData struct{} + +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away. +func (presence) raceDetectHookPresent(num uint32) {} +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {} +func (presence) raceDetectHookClearPresent(num uint32) {} +func (presence) raceDetectHookAllocAndCopy(src presence) {} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) {} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) {} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go new file mode 100644 index 000000000..e9a27583a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go @@ -0,0 +1,126 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package impl + +// When running under race detector, we add a presence map of bytes, that we can access +// in the hook functions so that we trigger the race detection whenever we have concurrent +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations. +type RaceDetectHookData struct { + shadowPresence *[]byte +} + +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence +// using non-atomic operations. +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) { + sp := make([]byte, size) + atomicStoreShadowPresence(&data.shadowPresence, &sp) +} + +func (p presence) raceDetectHookPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +func (p presence) raceDetectHookClearPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + + } +} + +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies +// shadowPresence bytes from src to lazy. +func (p presence) raceDetectHookAllocAndCopy(q presence) { + sData := q.toRaceDetectData() + dData := p.toRaceDetectData() + if sData == nil { + return + } + srcSp := atomicLoadShadowPresence(&sData.shadowPresence) + if srcSp == nil { + atomicStoreShadowPresence(&dData.shadowPresence, nil) + return + } + n := len(*srcSp) + dSlice := make([]byte, n) + atomicStoreShadowPresence(&dData.shadowPresence, &dSlice) + for i := 0; i < n; i++ { + dSlice[i] = (*srcSp)[i] + } +} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index f29e6a8fa..fe2c719ce 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { } return nil } + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + if mi.extensionOffset.IsValid() { e := p.Apply(mi.extensionOffset).Extensions() if err := mi.isInitExtensions(e); err != nil { @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { if !f.isRequired && f.funcs.isInit == nil { continue } + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit != nil { + f.mi.init() + if f.mi.needsInitCheck { + if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() { + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + if !lazy.AllowedPartial() { + // Nothing to see here, it was checked on unmarshal + continue + } + mi.lazyUnmarshal(p, f.num) + } + if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil { + return err + } + } + } + continue + } + fptr := p.Apply(f.offset) if f.isPointer && fptr.Elem().IsNil() { if f.isRequired { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20..0d5b546e0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44..d14d7d93c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -65,6 +62,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } @@ -118,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go new file mode 100644 index 000000000..76818ea25 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go @@ -0,0 +1,264 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + mi := getMessageInfo(ft) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessage, + marshal: appendOpaqueMessage, + unmarshal: consumeOpaqueMessage, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroup, + marshal: appendOpaqueGroup, + unmarshal: consumeOpaqueGroup, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize +} + +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + mp := p.AtomicGetPointer() + calculatedSize := f.mi.sizePointer(mp, opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, mp, opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error { + mp := p.AtomicGetPointer() + if mp.IsNil() { + return nil + } + return f.mi.checkInitializedPointer(mp) +} + +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstmp := dst.AtomicGetPointer() + if dstmp.IsNil() { + dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts) +} + +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts) +} + +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, e := f.mi.unmarshalPointer(b, mp, f.num, opts) + return o, e +} + +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft)) + } + mt := ft.Elem().Elem() // *[]*T -> *T + mi := getMessageInfo(mt) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessageSlice, + marshal: appendOpaqueMessageSlice, + unmarshal: consumeOpaqueMessageSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroupSlice, + marshal: appendOpaqueGroupSlice, + unmarshal: consumeOpaqueGroupSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error { + sp := p.AtomicGetPointer() + if sp.IsNil() { + return nil + } + s := sp.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + ds := dst.AtomicGetPointer() + if ds.IsNil() { + ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + for _, sp := range src.AtomicGetPointer().PointerSlice() { + dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + f.mi.mergePointer(dm, sp, opts) + ds.AppendPointerSlice(dm) + } +} + +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + out, err = f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae..229c69801 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66ea..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb73..f78b57b04 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -32,6 +32,10 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + lazyOffset offset + presenceOffset offset + presenceSize presenceSize } type coderFieldInfo struct { @@ -45,12 +49,19 @@ type coderFieldInfo struct { tagsize int // size of the varint-encoded tag isPointer bool // true if IsNil may be called on the struct field isRequired bool // true if field is required + + isLazy bool + presenceIndex uint32 } +const noPresence = 0xffffffff + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.sizecacheOffset = invalidOffset mi.unknownOffset = invalidOffset mi.extensionOffset = invalidOffset + mi.lazyOffset = invalidOffset + mi.presenceOffset = si.presenceOffset if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { mi.sizecacheOffset = si.sizecacheOffset @@ -107,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] @@ -127,6 +135,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { validation: newFieldValidationInfo(mi, si, fd, ft), isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == protoreflect.Required, + + presenceIndex: noPresence, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -189,6 +199,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go new file mode 100644 index 000000000..41c1f74ef --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + mi.extensionOffset = si.extensionOffset + mi.lazyOffset = si.lazyOffset + mi.presenceOffset = si.presenceOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fieldOffset = offsetOf(fs) + case fd.Message() != nil && !fd.IsMap(): + fieldOffset = offsetOf(fs) + if fd.IsList() { + childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) + } else { + childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) + } + default: + fieldOffset = offsetOf(fs) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si.structInfo, fd, ft), + isPointer: (fd.Cardinality() == protoreflect.Repeated || + fd.Kind() == protoreflect.MessageKind || + fd.Kind() == protoreflect.GroupKind), + isRequired: fd.Cardinality() == protoreflect.Required, + presenceIndex: noPresence, + } + + // TODO: Use presence for all fields. + // + // In some cases, such as maps, presence means only "might be set" rather + // than "is definitely set", but every field should have a presence bit to + // permit us to skip over definitely-unset fields at marshal time. + + var hasPresence bool + hasPresence, cf.isLazy = usePresenceForField(si, fd) + + if hasPresence { + cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) + } + + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si.structInfo) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) > len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23..077712c2c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a..f72ddd882 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a65..e4580b3ac 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index cda0520c2..e0dd21fa5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { AllowPartial: true, DiscardUnknown: o.DiscardUnknown(), Resolver: o.resolver, + + NoLazyDecoding: o.NoLazyDecoding(), } } @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&protoiface.UnmarshalDiscardUnknown != 0 } -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 } +func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 } +func (o unmarshalOptions) NoLazyDecoding() bool { + return o.flags&protoiface.UnmarshalNoLazyDecoding != 0 +} + +func (o unmarshalOptions) CanBeLazy() bool { + if o.resolver != protoregistry.GlobalTypes { + return false + } + // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set + return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0 } var lazyUnmarshalOptions = unmarshalOptions{ resolver: protoregistry.GlobalTypes, - depth: protowire.DefaultRecursionLimit, + + flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated, + + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } + + lazyDecoding := LazyEnabled() // default + if opts.NoLazyDecoding() { + lazyDecoding = false // explicitly disabled + } + if mi.lazyOffset.IsValid() && lazyDecoding { + return mi.unmarshalPointerLazy(b, p, groupTag, opts) + } + return mi.unmarshalPointerEager(b, p, groupTag, opts) +} + +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy. +// The corresponding function for Lazy is in google_lazy.go. +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true var requiredMask uint64 var exts *map[int32]ExtensionField + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + start := len(b) for len(b) > 0 { // Parse the tag (field number and wire type). @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if f.funcs.isInit != nil && !o.initialized { initialized = false } + + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: // Possible extension. if exts == nil && mi.extensionOffset.IsValid() { @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, errUnknown } if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { + if opts.CanBeLazy() && x.canLazy(xt) { out, valid := skipExtension(b, xi, num, wtyp, opts) switch valid { case ValidationValid: @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp if n < 0 { return out, ValidationUnknown } + + if opts.Validated() { + out.initialized = true + out.n = n + return out, ValidationValid + } + out, st := xi.validation.mi.validate(v, 0, opts) out.n = n return out, st diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd21224..b2e212291 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,8 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int e := p.Apply(mi.extensionOffset).Extensions() size += mi.sizeExtensions(e, opts) } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.size == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + + if f.isLazy && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + size += (*lazy).SizeField(uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + size += f.funcs.size(fptr, f, opts) + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, err } } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.marshal == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + if f.isLazy { + // Be careful, this field needs to be read atomically, like for a get + if f.isPointer && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + b, _ = (*lazy).AppendField(b, uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } else if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool { return opts.flags&piface.MarshalDeterministic == 0 } +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal. +func lazyFields(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // fields to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 000000000..9f6c32a7d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go new file mode 100644 index 000000000..c7de31e24 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math/bits" + "os" + "reflect" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var enableLazy int32 = func() int32 { + if os.Getenv("GOPROTODEBUG") == "nolazy" { + return 0 + } + return 1 +}() + +// EnableLazyUnmarshal enables lazy unmarshaling. +func EnableLazyUnmarshal(enable bool) { + if enable { + atomic.StoreInt32(&enableLazy, 1) + return + } + atomic.StoreInt32(&enableLazy, 0) +} + +// LazyEnabled reports whether lazy unmarshalling is currently enabled. +func LazyEnabled() bool { + return atomic.LoadInt32(&enableLazy) != 0 +} + +// UnmarshalField unmarshals a field in a message. +func UnmarshalField(m interface{}, num protowire.Number) { + switch m := m.(type) { + case *messageState: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + case *messageReflectWrapper: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + default: + panic(fmt.Sprintf("unsupported wrapper type %T", m)) + } +} + +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) { + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + if f == nil { + panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num)) + } + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num)) + if !found && multipleEntries == nil { + panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num)) + } + // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races. + // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil. + fp := pointerOfValue(reflect.New(f.ft)) + if multipleEntries != nil { + for _, entry := range multipleEntries { + mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags()) + } + } else { + mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags()) + } + p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem()) +} + +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error { + opts := lazyUnmarshalOptions + opts.flags |= flags + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return errors.New("invalid wire data") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + if num == f.num { + o, err := f.funcs.unmarshal(b, p, wtyp, f, opts) + if err == nil { + b = b[o.n:] + continue + } + if err != errUnknown { + return err + } + } + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + return nil +} + +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + fmi := f.validation.mi + if fmi == nil { + fd := mi.Desc.Fields().ByNumber(f.num) + if fd == nil { + return out, ValidationUnknown + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + if err != nil { + return out, ValidationUnknown + } + var ok bool + fmi, ok = messageType.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + } + fmi.init() + switch f.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationWrongWireType + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationInvalid + } + out, st := fmi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationWrongWireType + } + out, st := fmi.validate(b, f.num, opts) + return out, st + default: + return out, ValidationUnknown + } +} + +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it +// specifically handles lazy unmarshalling. it expects lazyOffset and +// presenceOffset to both be valid. +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true + var requiredMask uint64 + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + var lazyIndex []protolazy.IndexEntry + var lastNum protowire.Number + outOfOrder := false + lazyDecode := false + presence = p.Apply(mi.presenceOffset).PresenceInfo() + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + if !presence.AnyPresent(mi.presenceSize) { + if opts.CanBeLazy() { + // If the message contains existing data, we need to merge into it. + // Lazy unmarshaling doesn't merge, so only enable it when the + // message is empty (has no presence bitmap). + lazyDecode = true + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + (*lazy).SetUnmarshalFlags(opts.flags) + if !opts.AliasBuffer() { + // Make a copy of the buffer for lazy unmarshaling. + // Set the AliasBuffer flag so recursive unmarshal + // operations reuse the copy. + b = append([]byte{}, b...) + opts.flags |= piface.UnmarshalAliasBuffer + } + (*lazy).SetBuffer(b) + } + } + // Track special handling of lazy fields. + // + // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil). + // In the event that validation for a field fails, this map tracks handling of the field. + type lazyAction uint8 + const ( + lazyValidateOnly lazyAction = iota // validate the field only + lazyUnmarshalNow // eagerly unmarshal the field + lazyUnmarshalLater // unmarshal the field after the message is fully processed + ) + var lazyFields map[*coderFieldInfo]lazyAction + var exts *map[int32]ExtensionField + start := len(b) + pos := 0 + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + discardUnknown := false + Field: + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + if f.isLazy && lazyDecode { + switch { + case lazyFields == nil || lazyFields[f] == lazyValidateOnly: + // Attempt to validate this field and leave it for later lazy unmarshaling. + o, valid := mi.skipField(b, f, wtyp, opts) + switch valid { + case ValidationValid: + // Skip over the valid field and continue. + err = nil + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + requiredMask |= f.validation.requiredBit + if !o.initialized { + initialized = false + } + n = o.n + break Field + case ValidationInvalid: + return out, errors.New("invalid proto wire format") + case ValidationWrongWireType: + break Field + case ValidationUnknown: + if lazyFields == nil { + lazyFields = make(map[*coderFieldInfo]lazyAction) + } + if presence.Present(f.presenceIndex) { + // We were unable to determine if the field is valid or not, + // and we've already skipped over at least one instance of this + // field. Clear the presence bit (so if we stop decoding early, + // we don't leave a partially-initialized field around) and flag + // the field for unmarshaling before we return. + presence.ClearPresent(f.presenceIndex) + lazyFields[f] = lazyUnmarshalLater + discardUnknown = true + break Field + } else { + // We were unable to determine if the field is valid or not, + // but this is the first time we've seen it. Flag it as needing + // eager unmarshaling and fall through to the eager unmarshal case below. + lazyFields[f] = lazyUnmarshalNow + } + } + case lazyFields[f] == lazyUnmarshalLater: + // This field will be unmarshaled in a separate pass below. + // Skip over it here. + discardUnknown = true + break Field + default: + // Eagerly unmarshal the field. + } + } + if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) { + if p.Apply(f.offset).AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(p, f.num) + } + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + end := start - len(b) + if lazyDecode && f != nil && f.isLazy { + if num != lastNum { + lazyIndex = append(lazyIndex, protolazy.IndexEntry{ + FieldNum: uint32(num), + Start: uint32(pos), + End: uint32(end), + }) + } else { + i := len(lazyIndex) - 1 + lazyIndex[i].End = uint32(end) + lazyIndex[i].MultipleContiguous = true + } + } + if num < lastNum { + outOfOrder = true + } + pos = end + lastNum = num + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if lazyFields != nil { + // Some fields failed validation, and now need to be unmarshaled. + for f, action := range lazyFields { + if action != lazyUnmarshalLater { + continue + } + initialized = false + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil { + return out, err + } + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + } + if lazyDecode { + if outOfOrder { + sort.Slice(lazyIndex, func(i, j int) bool { + return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum || + (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum && + lazyIndex[i].Start < lazyIndex[j].Start) + }) + } + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + + (*lazy).SetIndex(lazyIndex) + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee6..b6849d669 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049b..a51dffbe2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index 7e65f64f2..8ffdce67d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { if src.IsNil() { return } + + var presenceSrc presence + var presenceDst presence + if mi.presenceOffset.IsValid() { + presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo() + presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo() + } + for _, f := range mi.orderedCoderFields { if f.funcs.merge == nil { continue } sfptr := src.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presenceSrc.Present(f.presenceIndex) { + continue + } + dfptr := dst.Apply(f.offset) + if f.isLazy { + if sfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(src, f.num) + } + if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(dst, f.num) + } + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + continue + } + if f.isPointer && sfptr.Elem().IsNil() { continue } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d45..d50423dcb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -30,8 +29,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. @@ -79,6 +78,9 @@ func (mi *MessageInfo) initOnce() { if mi.initDone == 1 { return } + if opaqueInitHook(mi) { + return + } t := mi.GoReflectType if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { @@ -117,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -126,13 +127,14 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset extensionType reflect.Type + lazyOffset offset + presenceOffset offset + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField oneofsByName map[protoreflect.Name]reflect.StructField oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber @@ -142,9 +144,10 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, + lazyOffset: invalidOffset, + presenceOffset: invalidOffset, fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, oneofsByName: map[protoreflect.Name]reflect.StructField{}, @@ -157,24 +160,23 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } + case "lazyFields", "XXX_lazyUnmarshalInfo": + si.lazyOffset = offsetOf(f) + case "XXX_presence": + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { @@ -244,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go new file mode 100644 index 000000000..dd55e8e00 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -0,0 +1,627 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type opaqueStructInfo struct { + structInfo +} + +// isOpaque determines whether a protobuf message type is on the Opaque API. It +// checks whether the type is a Go struct that protoc-gen-go would generate. +// +// This function only detects newly generated messages from the v2 +// implementation of protoc-gen-go. It is unable to classify generated messages +// that are too old or those that are generated by a different generator +// such as protoc-gen-gogo. +func isOpaque(t reflect.Type) bool { + // The current detection mechanism is to simply check the first field + // for a struct tag with the "protogen" key. + if t.Kind() == reflect.Struct && t.NumField() > 0 { + pgt := t.Field(0).Tag.Get("protogen") + return strings.HasPrefix(pgt, "opaque.") + } + return false +} + +func opaqueInitHook(mi *MessageInfo) bool { + mt := mi.GoReflectType.Elem() + si := opaqueStructInfo{ + structInfo: mi.makeStructInfo(mt), + } + + if !isOpaque(mt) { + return false + } + + defer atomic.StoreUint32(&mi.initDone, 1) + + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + fds := mi.Desc.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + usePresence, _ := usePresenceForField(si, fd) + + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneofs are no different for opaque. + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = mi.fieldInfoForMapOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil && usePresence: + fi = mi.fieldInfoForScalarListOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil: + // Proto3 lists without presence can use same access methods as open + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsList() && usePresence: + fi = mi.fieldInfoForMessageListOpaque(si, fd, fs) + case fd.IsList(): + // Proto3 opaque messages that does not need presence bitmap. + // Different representation than open struct, but same logic + fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs) + case fd.Message() != nil && usePresence: + fi = mi.fieldInfoForMessageOpaque(si, fd, fs) + case fd.Message() != nil: + // Proto3 messages without presence can use same access methods as open + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = mi.fieldInfoForScalarOpaque(si, fd, fs) + } + mi.fields[fd.Number()] = &fi + } + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < mi.Desc.Oneofs().Len(); i++ { + od := mi.Desc.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + mi.makeExtensionFieldsFunc(mt, si.structInfo) + mi.makeUnknownFieldsFunc(mt, si.structInfo) + mi.makeOpaqueCoderMethods(mt, si) + mi.makeFieldTypes(si.structInfo) + + return true +} + +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) + } + fieldOffset := offsetOf(fs) + conv := NewConverter(ft, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + // Don't bother checking presence bits, since we need to + // look at the map length even if the presence bit is set. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting map field to read-only value")) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } + mi.setPresent(p, index) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + mi.setPresent(p, index) + return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type)) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if !mi.present(p, index) { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + if !mi.present(p, index) { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } else { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + } + rv := sp.AsValueOf(fs.Type.Elem()) + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return false + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + sp := p.Apply(fieldOffset).AtomicGetPointer() + if !sp.IsNil() { + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return conv.Zero() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() { + nullable = true + } + deref := false + if nullable && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + deref = true + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + var getter func(p pointer) protoreflect.Value + if !nullable { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset) + } + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if nullable { + return mi.present(p, index) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + if nullable { + mi.clearPresent(p, index) + } + // This is only valuable for bytes and strings, but we do it unconditionally. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: getter, + // TODO: Implement unsafe fast path for set? + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if deref { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + + rv.Set(conv.GoValueOf(v)) + if nullable && rv.Kind() == reflect.Slice && rv.IsNil() { + rv.Set(emptyBytes) + } + if nullable { + mi.setPresent(p, index) + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + elemType := fs.Type.Elem() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + return mi.present(p, index) + }, + clear: func(p pointer) { + mi.clearPresent(p, index) + p.Apply(fieldOffset).AtomicSetNilPointer() + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } + rv := mp.AsValueOf(elemType) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + val := pointerOfValue(conv.GoValueOf(v)) + if val.IsNil() { + panic("invalid nil pointer") + } + p.Apply(fieldOffset).AtomicSetPointer(val) + mi.setPresent(p, index) + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } else { + mp = pointerOfValue(conv.GoValueOf(conv.New())) + fp.AtomicSetPointer(mp) + mi.setPresent(p, index) + } + } + return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem())) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +// A presenceList wraps a List, updating presence bits as necessary when the +// list contents change. +type presenceList struct { + pvalueList + setPresence func(bool) +} +type pvalueList interface { + protoreflect.List + //Unwrapper +} + +func (list presenceList) Append(v protoreflect.Value) { + list.pvalueList.Append(v) + list.setPresence(true) +} +func (list presenceList) Truncate(i int) { + list.pvalueList.Truncate(i) + list.setPresence(i > 0) +} + +// presenceIndex returns the index to pass to presence functions. +// +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields. +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) { + found := false + var index, numIndices uint32 + for i := 0; i < md.Fields().Len(); i++ { + f := md.Fields().Get(i) + if f == fd { + found = true + index = numIndices + } + if f.ContainingOneof() == nil || isLastOneofField(f) { + numIndices++ + } + } + if !found { + panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName())) + } + return index, presenceSize(numIndices) +} + +func isLastOneofField(fd protoreflect.FieldDescriptor) bool { + fields := fd.ContainingOneof().Fields() + return fields.Get(fields.Len()-1) == fd +} + +func (mi *MessageInfo) setPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize) +} + +func (mi *MessageInfo) clearPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index) +} + +func (mi *MessageInfo) present(p pointer, index uint32) bool { + return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) +} + +// usePresenceForField implements the somewhat intricate logic of when +// the presence bitmap is used for a field. The main logic is that a +// field that is optional or that can be lazy will use the presence +// bit, but for proto2, also maps have a presence bit. It also records +// if the field can ever be lazy, which is true if we have a +// lazyOffset and the field is a message or a slice of messages. A +// field that is lazy will always need a presence bit. Oneofs are not +// lazy and do not use presence, unless they are a synthetic oneof, +// which is a proto3 optional field. For proto3 optionals, we use the +// presence and they can also be lazy when applicable (a message). +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() + + // Non-oneof scalar fields with explicit field presence use the presence array. + usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + return false, false + case fd.IsMap(): + return false, false + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + return hasLazyField, hasLazyField + default: + return usesPresenceArray || (hasLazyField && fd.HasPresence()), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go new file mode 100644 index 000000000..a69825699 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go @@ -0,0 +1,132 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + // Enums for nullable opaque types. + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index ecb4623d7..0d20132fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -205,6 +203,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { case fd.IsList(): if fd.Enum() != nil || fd.Message() != nil { ft = fs.Type.Elem() + + if ft.Kind() == reflect.Slice { + ft = ft.Elem() + } + } isMessage = fd.Message() != nil case fd.Enum() != nil: @@ -214,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b19..68d4ae32e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -256,6 +253,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + var getter func(p pointer) protoreflect.Value if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { // This never occurs for generated message types. @@ -268,19 +266,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + + // Generate specialized getter functions to avoid going through reflect.Value + if nullable { + getter = getterForNullableScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { if p.IsNil() { return false } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable { - return !rv.IsNil() + return !p.Apply(fieldOffset).Elem().IsNil() } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() switch rv.Kind() { case reflect.Bool: return rv.Bool() @@ -300,21 +304,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) protoreflect.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, + get: getter, + // TODO: Implement unsafe fast path for set? set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { @@ -338,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -425,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -472,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -485,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -503,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go new file mode 100644 index 000000000..af5e063a1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + elemType := fs.Type.Elem() + // Enums for nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType) + if rv.IsNil() { + return conv.Zero() + } + return conv.PBValueOf(rv.Elem()) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).BoolPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBool(**x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt32(**x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint32(**x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt64(**x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint64(**x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat32(**x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat64(**x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if len(*x) == 0 { + return conv.Zero() + } + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} + +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if fd.Kind() == protoreflect.EnumKind { + // Enums for non nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + if len(*x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + return protoreflect.ValueOfString(*x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a2..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8..62f8bf663 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,15 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( "reflect" "sync/atomic" "unsafe" + + "google.golang.org/protobuf/internal/protolazy" ) const UnsafeEnabled = true @@ -23,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -112,8 +111,14 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { + return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) +} + +func (p pointer) PresenceInfo() presence { + return presence{P: p.p} +} func (p pointer) Elem() pointer { return pointer{p: *(*unsafe.Pointer)(p.p)} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go new file mode 100644 index 000000000..38aa7b7dc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go @@ -0,0 +1,42 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +func (p pointer) AtomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) AtomicSetPointer(v pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), v.p) +} + +func (p pointer) AtomicSetNilPointer() { + atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil)) +} + +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) { + return v + } + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +type atomicV1MessageInfo struct{ p Pointer } + +func (mi *atomicV1MessageInfo) Get() Pointer { + return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p))) +} + +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) { + return p + } + return mi.Get() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go new file mode 100644 index 000000000..914cb1ded --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +// presenceSize represents the size of a presence set, which should be the largest index of the set+1 +type presenceSize uint32 + +// presence is the internal representation of the bitmap array in a generated protobuf +type presence struct { + // This is a pointer to the beginning of an array of uint32 + P unsafe.Pointer +} + +func (p presence) toElem(num uint32) (ret *uint32) { + const ( + bitsPerByte = 8 + siz = unsafe.Sizeof(*ret) + ) + // p.P points to an array of uint32, num is the bit in this array that the + // caller wants to check/manipulate. Calculate the index in the array that + // contains this specific bit. E.g.: 76 / 32 = 2 (integer division). + offset := uintptr(num) / (siz * bitsPerByte) * siz + return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset)) +} + +// Present checks for the presence of a specific field number in a presence set. +func (p presence) Present(num uint32) bool { + if p.P == nil { + return false + } + return Export{}.Present(p.toElem(num), num) +} + +// SetPresent adds presence for a specific field number in a presence set. +func (p presence) SetPresent(num uint32, size presenceSize) { + Export{}.SetPresent(p.toElem(num), num, uint32(size)) +} + +// SetPresentUnatomic adds presence for a specific field number in a presence set without using +// atomic operations. Only to be called during unmarshaling. +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) { + Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size)) +} + +// ClearPresent removes presence for a specific field number in a presence set. +func (p presence) ClearPresent(num uint32) { + Export{}.ClearPresent(p.toElem(num), num) +} + +// LoadPresenceCache (together with PresentInCache) allows for a +// cached version of checking for presence without re-reading the word +// for every field. It is optimized for efficiency and assumes no +// simltaneous mutation of the presence set (or at least does not have +// a problem with simultaneous mutation giving inconsistent results). +func (p presence) LoadPresenceCache() (current uint32) { + if p.P == nil { + return 0 + } + return atomic.LoadUint32((*uint32)(p.P)) +} + +// PresentInCache reads presence from a cached word in the presence +// bitmap. It caches up a new word if the bit is outside the +// word. This is for really fast iteration through bitmaps in cases +// where we either know that the bitmap will not be altered, or we +// don't care about inconsistencies caused by simultaneous writes. +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool { + if num/32 != *cachedElement { + o := uintptr(num/32) * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + *current = atomic.LoadUint32(q) + *cachedElement = num / 32 + } + return (*current & (1 << (num % 32))) > 0 +} + +// AnyPresent checks if any field is marked as present in the bitmap. +func (p presence) AnyPresent(size presenceSize) bool { + n := uintptr((size + 31) / 32) + for j := uintptr(0); j < n; j++ { + o := j * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + b := atomic.LoadUint32(q) + if b > 0 { + return true + } + } + return false +} + +// toRaceDetectData finds the preceding RaceDetectHookData in a +// message by using pointer arithmetic. As the type of the presence +// set (bitmap) varies with the number of fields in the protobuf, we +// can not have a struct type containing the array and the +// RaceDetectHookData. instead the RaceDetectHookData is placed +// immediately before the bitmap array, and we find it by walking +// backwards in the struct. +// +// This method is only called from the race-detect version of the code, +// so RaceDetectHookData is never an empty struct. +func (p presence) toRaceDetectData() *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o)) +} + +func atomicLoadShadowPresence(p **[]byte) *[]byte { + return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreShadowPresence(p **[]byte, v *[]byte) { + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v)) +} + +// findPointerToRaceDetectData finds the preceding RaceDetectHookData +// in a message by using pointer arithmetic. For the methods called +// directy from generated code, we don't have a pointer to the +// beginning of the presence set, but a pointer inside the array. As +// we know the index of the bit we're manipulating (num), we can +// calculate which element of the array ptr is pointing to. With that +// information we find the preceding RaceDetectHookData and can +// manipulate the shadow bitmap. +// +// This method is only called from the race-detect version of the +// code, so RaceDetectHookData is never an empty struct. +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0)) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index a24e6bbd7..7b2995dde 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -37,6 +37,10 @@ const ( // ValidationValid indicates that unmarshaling the message will succeed. ValidationValid + + // ValidationWrongWireType indicates that a validated field does not have + // the expected wire type. + ValidationWrongWireType ) func (v ValidationStatus) String() string { @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } case protoreflect.GroupKind: vi.typ = validationTypeGroup + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } @@ -195,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -304,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba9..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go new file mode 100644 index 000000000..82e5cab4a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go @@ -0,0 +1,364 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper code for parsing a protocol buffer + +package protolazy + +import ( + "errors" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" +) + +// BufferReader is a structure encapsulating a protobuf and a current position +type BufferReader struct { + Buf []byte + Pos int +} + +// NewBufferReader creates a new BufferRead from a protobuf +func NewBufferReader(buf []byte) BufferReader { + return BufferReader{Buf: buf, Pos: 0} +} + +var errOutOfBounds = errors.New("protobuf decoding: out of bounds") +var errOverflow = errors.New("proto: integer overflow") + +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) { + i := b.Pos + l := len(b.Buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + v := b.Buf[i] + i++ + x |= (uint64(v) & 0x7F) << shift + if v < 0x80 { + b.Pos = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// decodeVarint decodes a varint at the current position +func (b *BufferReader) DecodeVarint() (x uint64, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return b.DecodeVarintSlow() + } + + var v uint64 + // we already checked the first byte + x = uint64(buf[i]) & 127 + i++ + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 35 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 42 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 49 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 56 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 63 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// decodeVarint32 decodes a varint32 at the current position +func (b *BufferReader) DecodeVarint32() (x uint32, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint32(buf[i]), nil + } else if len(buf)-i < 5 { + v, err := b.DecodeVarintSlow() + return uint32(v), err + } + + var v uint32 + // we already checked the first byte + x = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// skipValue skips a value in the protobuf, based on the specified tag +func (b *BufferReader) SkipValue(tag uint32) (err error) { + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + err = b.SkipGroup(tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + return +} + +// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack +func (b *BufferReader) SkipGroup(tag uint32) (err error) { + tagStack := make([]uint32, 0, 16) + tagStack = append(tagStack, tag) + var n uint32 + for len(tagStack) > 0 { + tag, err = b.DecodeVarint32() + if err != nil { + return err + } + switch protowire.Type(tag & 0x7) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.Skip(8) + case protowire.BytesType: + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + tagStack = append(tagStack, tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + case protowire.EndGroupType: + if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) { + tagStack = tagStack[:len(tagStack)-1] + } else { + err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d", + protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos) + } + } + if err != nil { + return err + } + } + return nil +} + +// skipVarint effiently skips a varint +func (b *BufferReader) SkipVarint() (err error) { + i := b.Pos + + if len(b.Buf)-i < 10 { + // Use DecodeVarintSlow() to check for buffer overflow, but ignore result + if _, err := b.DecodeVarintSlow(); err != nil { + return err + } + return nil + } + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + return errOverflow + +out: + b.Pos = i + 1 + return nil +} + +// skip skips the specified number of bytes +func (b *BufferReader) Skip(n int) (err error) { + if len(b.Buf) < b.Pos+n { + return io.ErrUnexpectedEOF + } + b.Pos += n + return +} + +// skipFixed64 skips a fixed64 +func (b *BufferReader) SkipFixed64() (err error) { + return b.Skip(8) +} + +// skipFixed32 skips a fixed32 +func (b *BufferReader) SkipFixed32() (err error) { + return b.Skip(4) +} + +// skipBytes skips a set of bytes +func (b *BufferReader) SkipBytes() (err error) { + n, err := b.DecodeVarint32() + if err != nil { + return err + } + return b.Skip(int(n)) +} + +// Done returns whether we are at the end of the protobuf +func (b *BufferReader) Done() bool { + return b.Pos == len(b.Buf) +} + +// Remaining returns how many bytes remain +func (b *BufferReader) Remaining() int { + return len(b.Buf) - b.Pos +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go new file mode 100644 index 000000000..ff4d4834b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go @@ -0,0 +1,359 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protolazy contains internal data structures for lazy message decoding. +package protolazy + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// IndexEntry is the structure for an index of the fields in a message of a +// proto (not descending to sub-messages) +type IndexEntry struct { + FieldNum uint32 + // first byte of this tag/field + Start uint32 + // first byte after a contiguous sequence of bytes for this tag/field, which could + // include a single encoding of the field, or multiple encodings for the field + End uint32 + // True if this protobuf segment includes multiple encodings of the field + MultipleContiguous bool +} + +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message +// +// Deprecated: Do not use. This will be deleted in the near future. +type XXX_lazyUnmarshalInfo struct { + // Index of fields and their positions in the protobuf for this + // message. Make index be a pointer to a slice so it can be updated + // atomically. The index pointer is only set once (lazily when/if + // the index is first needed), and must always be SET and LOADED + // ATOMICALLY. + index *[]IndexEntry + // The protobuf associated with this lazily decoded message. It is + // only set during proto.Unmarshal(). It doesn't need to be set and + // loaded atomically, since any simultaneous set (Unmarshal) and read + // (during a get) would already be a race in the app code. + Protobuf []byte + // The flags present when Unmarshal was originally called for this particular message + unmarshalFlags piface.UnmarshalInputFlags +} + +// The Buffer and SetBuffer methods let v2/internal/impl interact with +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle. + +// Buffer returns the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte { + return lazy.Protobuf +} + +// SetBuffer sets the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) { + lazy.Protobuf = b +} + +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags. +// The flags should reflect how Unmarshal was called. +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) { + lazy.unmarshalFlags = f +} + +// UnmarshalFlags returns the original unmarshalInputFlags. +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags { + return lazy.unmarshalFlags +} + +// AllowedPartial returns true if the user originally unmarshalled this message with +// AllowPartial set to true +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool { + return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0 +} + +func protoFieldNumber(tag uint32) uint32 { + return tag >> 3 +} + +// buildIndex builds an index of the specified protobuf, return the index +// array and an error. +func buildIndex(buf []byte) ([]IndexEntry, error) { + index := make([]IndexEntry, 0, 16) + var lastProtoFieldNum uint32 + var outOfOrder bool + + var r BufferReader = NewBufferReader(buf) + + for !r.Done() { + var tag uint32 + var err error + var curPos = r.Pos + // INLINED: tag, err = r.DecodeVarint32() + { + i := r.Pos + buf := r.Buf + + if i >= len(buf) { + return nil, errOutOfBounds + } else if buf[i] < 0x80 { + r.Pos++ + tag = uint32(buf[i]) + } else if r.Remaining() < 5 { + var v uint64 + v, err = r.DecodeVarintSlow() + tag = uint32(v) + } else { + var v uint32 + // we already checked the first byte + tag = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 28 + if v < 128 { + goto done + } + + return nil, errOutOfBounds + + done: + r.Pos = i + } + } + // DONE: tag, err = r.DecodeVarint32() + + fieldNum := protoFieldNumber(tag) + if fieldNum < lastProtoFieldNum { + outOfOrder = true + } + + // Skip the current value -- will skip over an entire group as well. + // INLINED: err = r.SkipValue(tag) + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + // INLINED: err = r.SkipVarint() + i := r.Pos + + if len(r.Buf)-i < 10 { + // Use DecodeVarintSlow() to skip while + // checking for buffer overflow, but ignore result + _, err = r.DecodeVarintSlow() + goto out2 + } + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + return nil, errOverflow + out: + r.Pos = i + 1 + // DONE: err = r.SkipVarint() + case protowire.Fixed64Type: + err = r.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = r.DecodeVarint32() + if err == nil { + err = r.Skip(int(n)) + } + case protowire.StartGroupType: + err = r.SkipGroup(tag) + case protowire.Fixed32Type: + err = r.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + // DONE: err = r.SkipValue(tag) + + out2: + if err != nil { + return nil, err + } + if fieldNum != lastProtoFieldNum { + index = append(index, IndexEntry{FieldNum: fieldNum, + Start: uint32(curPos), + End: uint32(r.Pos)}, + ) + } else { + index[len(index)-1].End = uint32(r.Pos) + index[len(index)-1].MultipleContiguous = true + } + lastProtoFieldNum = fieldNum + } + if outOfOrder { + sort.Slice(index, func(i, j int) bool { + return index[i].FieldNum < index[j].FieldNum || + (index[i].FieldNum == index[j].FieldNum && + index[i].Start < index[j].Start) + }) + } + return index, nil +} + +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + size += int(entry.End - entry.Start) + } + return size + } + if !found { + return 0 + } + return int(end - start) +} + +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + b = append(b, lazy.Protobuf[entry.Start:entry.End]...) + } + return b, true + } + if !found { + return nil, false + } + b = append(b, lazy.Protobuf[start:end]...) + return b, true +} + +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) { + atomicStoreIndex(&lazy.index, &index) +} + +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information +// (including protobuf), returns startOffset/endOffset/found. +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) { + if lazy.Protobuf == nil { + // There is no backing protobuf for this message -- it was made from a builder + return 0, 0, false, false, nil + } + index := atomicLoadIndex(&lazy.index) + if index == nil { + r, err := buildIndex(lazy.Protobuf) + if err != nil { + panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err)) + } + // lazy.index is a pointer to the slice returned by BuildIndex + index = &r + atomicStoreIndex(&lazy.index, index) + } + return lookupField(index, fieldNum) +} + +// lookupField returns the offset at which the indicated field starts using +// the index, offset immediately after field ends (including all instances of +// a repeated field), and bools indicating if field was found and if there +// are multiple encodings of the field in the byte range. +// +// To hande the uncommon case where there are repeated encodings for the same +// field which are not consecutive in the protobuf (so we need to returns +// multiple start/end offsets), we also return a slice multipleEntries. If +// multipleEntries is non-nil, then multiple entries were found, and the +// values in the slice should be used, rather than start/end/found. +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) { + // The pointer indexp to the index was already loaded atomically. + // The slice is uniquely associated with the pointer, so it doesn't + // need to be loaded atomically. + index := *indexp + for i, entry := range index { + if fieldNum == entry.FieldNum { + if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum { + // Handle the uncommon case where there are + // repeated entries for the same field which + // are not contiguous in the protobuf. + multiple := make([]IndexEntry, 1, 2) + multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous} + i++ + for i < len(index) && index[i].FieldNum == fieldNum { + multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous}) + i++ + } + return 0, 0, false, false, multiple + + } + return entry.Start, entry.End, true, entry.MultipleContiguous, nil + } + if fieldNum < entry.FieldNum { + return 0, 0, false, false, nil + } + } + return 0, 0, false, false, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go new file mode 100644 index 000000000..dc2a64ca6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protolazy + +import ( + "sync/atomic" + "unsafe" +) + +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry { + return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f3338..000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd09..832a7988f 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba..1ffddf687 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f686..01efc3303 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 36 + Patch = 5 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index d75a6534c..4cbf1aeaf 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -47,6 +46,12 @@ type UnmarshalOptions struct { // RecursionLimit limits how deeply messages may be nested. // If zero, a default limit is applied. RecursionLimit int + + // + // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by + // default. Lazy decoding only affects submessages (annotated with [lazy = + // true] in the .proto file) within messages that use the Opaque API. + NoLazyDecoding bool } // Unmarshal parses the wire-format message in b and places the result in m. @@ -104,6 +109,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } + + if !allowPartial { + // This does not affect how current unmarshal functions work, it just allows them + // to record this for lazy the decoding case. + in.Flags |= protoiface.UnmarshalCheckRequired + } + if o.NoLazyDecoding { + in.Flags |= protoiface.UnmarshalNoLazyDecoding + } + out, err = methods.Unmarshal(in) } else { o.RecursionLimit-- @@ -156,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 1f847bcc3..f0473c586 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -63,7 +63,8 @@ type MarshalOptions struct { // options (except for UseCachedSize itself). // // 2. The message and all its submessages have not changed in any - // way since the Size call. + // way since the Size call. For lazily decoded messages, accessing + // a message results in decoding the message, which is a change. // // If either of these invariants is violated, // the results are undefined and may include panics or corrupted output. diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03..c36d4a9cd 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f2928..78445d116 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 052fb5ae3..c8675806c 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -12,11 +12,19 @@ import ( ) // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func Size(m Message) int { return MarshalOptions{}.Size(m) } // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func (o MarshalOptions) Size(m Message) int { // Treat a nil message interface as an empty message; nothing to output. if m == nil { diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go new file mode 100644 index 000000000..267fd0f1f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// ValueOrNil returns nil if has is false, or a pointer to a new variable +// containing the value returned by the specified getter. +// +// This function is similar to the wrappers (proto.Int32(), proto.String(), +// etc.), but is generic (works for any field type) and works with the hasser +// and getter of a field, as opposed to a value. +// +// This is convenient when populating builder fields. +// +// Example: +// +// hop := attr.GetDirectHop() +// injectedRoute := ripb.InjectedRoute_builder{ +// Prefixes: route.GetPrefixes(), +// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress), +// } +func ValueOrNil[T any](has bool, getter func() T) *T { + if !has { + return nil + } + v := getter() + return &v +} + +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise +// it returns a pointer to an empty val message. +// +// This function allows for translating code from the old Open Struct API to the +// new Opaque API. +// +// The old Open Struct API represented oneof fields with a wrapper struct: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{ +// // The Avatar oneof will be set, with an empty SignedImage. +// Avatar: &accountpb.Profile_SignedImage{signedImg}, +// } +// +// The new Opaque API treats oneof fields like regular fields, there are no more +// wrapper structs: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{} +// profile.SetSignedImage(signedImg) +// +// For convenience, the Opaque API also offers Builders, which allow for a +// direct translation of struct initialization. However, because Builders use +// nilness to represent field presence (but there is no non-nil wrapper struct +// anymore), Builders cannot distinguish between an unset oneof and a set oneof +// with nil message. The above code would need to be translated with help of the +// ValueOrDefault function to retain the same behavior: +// +// var signedImg *accountpb.SignedImage +// return &accountpb.Profile_builder{ +// SignedImage: proto.ValueOrDefault(signedImg), +// }.Build() +func ValueOrDefault[T interface { + *P + Message +}, P any](val T) T { + if val == nil { + return T(new(P)) + } + return val +} + +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of +// type []byte. +func ValueOrDefaultBytes(val []byte) []byte { + if val == nil { + return []byte{} + } + return val +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f5..823dbf3ba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,8 @@ package protodesc import ( + "strings" + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" @@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) @@ -126,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 856175542..9da34998b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -149,7 +149,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +214,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab29..ff692436e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { @@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2eb..c343d9227 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } @@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } @@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda..697a61b29 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,10 +11,11 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} @@ -43,6 +44,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -123,10 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + fields := gf.Descriptor().Fields() + + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d400..9b880aa8c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6eb..742cb518c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadbaf..cd7fbc87a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06ff..a4b78acef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // [google.golang.org/protobuf/runtime/protoiface.Methods]. // Consult the protoiface package documentation for details. ProtoMethods() *methods } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead..0015fcb35 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990..479527b58 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d8..28e9e9f03 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8 const ( UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota + + // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer. + // The unmarshaller must not modify the contents of the buffer. + UnmarshalAliasBuffer + + // UnmarshalValidated indicates that validation has already been + // performed on the input buffer. + UnmarshalValidated + + // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are + // initialized. + UnmarshalCheckRequired + + // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use + // lazy decoding, even when otherwise available. + UnmarshalNoLazyDecoding ) // UnmarshalOutputFlags are output from the Unmarshal method. @@ -166,3 +185,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go index 4a1ab7fb3..93df1b569 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/filetype" "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/internal/protolazy" ) // UnsafeEnabled specifies whether package unsafe can be used. @@ -39,6 +40,9 @@ type ( ExtensionFieldV1 = impl.ExtensionField Pointer = impl.Pointer + + LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo + RaceDetectHookData = impl.RaceDetectHookData ) var X impl.Export diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb075..a51633767 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -69,7 +70,7 @@ const ( Edition_EDITION_2023 Edition = 1000 Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be - // used or relyed on outside of tests. + // used or relied on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 Edition_EDITION_2_TEST_ONLY Edition = 2 Edition_EDITION_99997_TEST_ONLY Edition = 99997 @@ -577,8 +578,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { } // If set to RETENTION_SOURCE, the option will be omitted from the binary. -// Note: as of January 2023, support for this is in progress and does not yet -// have an effect (b/264593489). type FieldOptions_OptionRetention int32 const ( @@ -640,8 +639,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { // This indicates the types of entities that the field may apply to when used // as an option. If it is unset, then the field may be freely used as an -// option on any kind of entity. Note: as of January 2023, support for this is -// in progress and does not yet have an effect (b/264593489). +// option on any kind of entity. type FieldOptions_OptionTargetType int32 const ( @@ -1208,20 +1206,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1228,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1256,12 +1252,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { // Describes a complete .proto file. type FileDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. @@ -1286,16 +1279,16 @@ type FileDescriptorProto struct { // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1299,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1414,10 +1407,7 @@ func (x *FileDescriptorProto) GetEdition() Edition { // Describes a message type. type DescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` @@ -1429,16 +1419,16 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1439,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1535,11 +1525,7 @@ func (x *DescriptorProto) GetReservedName() []string { } type ExtensionRangeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` // For external users: DO NOT USE. We are in the process of open sourcing @@ -1551,7 +1537,10 @@ type ExtensionRangeOptions struct { // The verification state of the range. // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. - Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ExtensionRangeOptions fields. @@ -1561,11 +1550,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1563,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1621,10 +1608,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica // Describes a field within a message. type FieldDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` @@ -1676,15 +1660,15 @@ type FieldDescriptorProto struct { // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1679,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1789,21 +1773,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool { // Describes a oneof. type OneofDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1795,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1845,10 +1826,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` @@ -1858,16 +1836,16 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1856,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1930,22 +1908,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string { // Describes a value within an enum. type EnumValueDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1931,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1994,22 +1969,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +1992,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2058,11 +2030,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { // Describes a method of a service. type MethodDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` @@ -2072,6 +2041,8 @@ type MethodDescriptorProto struct { ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodDescriptorProto fields. @@ -2082,11 +2053,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2066,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2155,11 +2124,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool { } type FileOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards @@ -2251,6 +2216,9 @@ type FileOptions struct { // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FileOptions fields. @@ -2267,11 +2235,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2248,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2446,11 +2412,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { } type MessageOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less @@ -2523,6 +2485,9 @@ type MessageOptions struct { Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MessageOptions fields. @@ -2534,11 +2499,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2512,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2615,17 +2578,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { } type FieldOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` + // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is only implemented to support use of // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of - // type "bytes" in the open source release -- sorry, we'll try to include - // other types in a future version! + // type "bytes" in the open source release. + // TODO: make ctype actually deprecated. Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2692,6 +2652,9 @@ type FieldOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FieldOptions fields. @@ -2707,11 +2670,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2683,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2836,24 +2797,21 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { } type OneofOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2822,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2894,11 +2852,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set this option to true to allow mapping different tag names to the same // value. AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` @@ -2920,6 +2874,9 @@ type EnumOptions struct { Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumOptions fields. @@ -2929,11 +2886,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2899,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2996,11 +2951,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumValueOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, @@ -3016,6 +2967,9 @@ type EnumValueOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumValueOptions fields. @@ -3026,11 +2980,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +2993,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3092,11 +3044,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { } type ServiceOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? @@ -3106,6 +3054,9 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ServiceOptions fields. @@ -3115,11 +3066,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3079,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3167,11 +3116,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { } type MethodOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, @@ -3182,6 +3127,9 @@ type MethodOptions struct { Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodOptions fields. @@ -3192,11 +3140,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3153,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3257,11 +3203,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. type UninterpretedOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` @@ -3270,15 +3213,15 @@ type UninterpretedOption struct { DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3232,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3360,26 +3303,23 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3330,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3452,10 +3392,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { // feature resolution. The resolution with this object becomes a simple search // for the closest matching edition, followed by proto merges. type FeatureSetDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` // The minimum supported edition (inclusive) when this was constructed. // Editions before this will not have defaults. @@ -3463,15 +3400,15 @@ type FeatureSetDefaults struct { // The maximum known edition (inclusive) when this was constructed. Editions // after this will not have reliable defaults. MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3419,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3521,10 +3458,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition { // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar @@ -3573,16 +3507,17 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3528,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3619,22 +3554,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. type GeneratedCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3577,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3668,22 +3600,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { } type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3623,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3734,21 +3663,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3685,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3789,10 +3715,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { } type ExtensionRangeOptions_Declaration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The extension number declared within the extension range. Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` // The fully-qualified name of the extension field. There must be a leading @@ -3808,16 +3731,16 @@ type ExtensionRangeOptions_Declaration struct { Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` // If true, indicates that the extension must be defined as repeated. // Otherwise the extension must be defined as optional. - Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3751,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3885,21 +3808,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { // is inclusive such that it can appropriately represent the entire int32 // domain. type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3830,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3940,21 +3860,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { } type FieldOptions_EditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. + sizeCache protoimpl.SizeCache } func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3882,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3996,10 +3913,7 @@ func (x *FieldOptions_EditionDefault) GetValue() string { // Information about the support window of a feature. type FieldOptions_FeatureSupport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The edition that this feature was first available in. In editions // earlier than this one, the default assigned to EDITION_LEGACY will be // used, and proto files will not be able to override it. @@ -4014,15 +3928,15 @@ type FieldOptions_FeatureSupport struct { // this one, the last default assigned will be used, and proto files will // not be able to override it. EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3947,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4082,21 +3996,18 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents // "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` unknownFields protoimpl.UnknownFields - - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4018,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4141,24 +4052,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { // the defaults at the closest matching edition ordered at or before it should // be used. This field must be in strict ascending order by edition. type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` // Defaults of features that can be overridden in this edition. OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` // Defaults of features that can't be overridden in this edition. FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4077,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4206,10 +4114,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur } type SourceCodeInfo_Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies which part of the FileDescriptorProto was defined at this // location. // @@ -4301,15 +4206,15 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4225,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4371,10 +4276,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { } type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -4386,17 +4288,17 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4309,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4459,498 +4361,478 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, - 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, - 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, - 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, - 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, - 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, + 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, + 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, - 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, + 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, + 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, + 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, + 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, + 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, + 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, + 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, + 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, + 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, + 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, + 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, - 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, - 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, - 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, - 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, - 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, + 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, + 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, + 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, + 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, - 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, - 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, - 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, - 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, - 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, - 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, + 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, + 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, - 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, + 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, - 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, - 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, + 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, + 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, + 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, + 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, + 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, + 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, + 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, + 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, + 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, + 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, @@ -4959,284 +4841,306 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, - 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, + 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, - 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, + 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, + 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, + 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, - 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, - 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, - 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, - 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, - 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, - 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, - 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, - 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, - 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, - 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, - 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, - 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, - 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, - 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, - 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, - 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, - 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, - 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, - 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, - 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, - 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, - 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, - 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, - 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, - 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, + 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, + 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, + 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, + 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, + 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, + 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, - 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, - 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, - 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, - 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, - 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, - 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, - 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, - 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, + 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, + 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, + 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, + 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, + 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, + 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, + 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, + 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, + 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, + 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, + 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, + 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, + 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, + 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } @@ -5385,429 +5289,11 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, NumMessages: 33, NumExtensions: 0, @@ -5819,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index c432817bb..8e759fc9f 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -28,11 +28,7 @@ type extField struct { type Types struct { // atomicExtFiles is used with sync/atomic and hence must be the first word // of the struct to guarantee 64-bit alignment. - // - // TODO(stapelberg): once we only support Go 1.19 and newer, switch this - // field to be of type atomic.Uint64 to guarantee alignment on - // stack-allocated values, too. - atomicExtFiles uint64 + atomicExtFiles atomic.Uint64 extMu sync.Mutex files *protoregistry.Files @@ -90,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. - if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() != uint64(t.files.NumFiles()) { t.updateExtensions() } xd := t.extensionsByMessage[extField{message, field}] @@ -133,10 +129,10 @@ func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { func (t *Types) updateExtensions() { t.extMu.Lock() defer t.extMu.Unlock() - if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() == uint64(t.files.NumFiles()) { return } - defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + defer t.atomicExtFiles.Store(uint64(t.files.NumFiles())) t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { t.registerExtensions(fd.Extensions()) t.registerExtensionsInMessages(fd.Messages()) diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c5..28d24bad7 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,24 +16,153 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) -type GoFeatures struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GoFeatures_APILevel int32 + +const ( + // API_LEVEL_UNSPECIFIED results in selecting the OPEN API, + // but needs to be a separate value to distinguish between + // an explicitly set api level or a missing api level. + GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0 + GoFeatures_API_OPEN GoFeatures_APILevel = 1 + GoFeatures_API_HYBRID GoFeatures_APILevel = 2 + GoFeatures_API_OPAQUE GoFeatures_APILevel = 3 +) + +// Enum value maps for GoFeatures_APILevel. +var ( + GoFeatures_APILevel_name = map[int32]string{ + 0: "API_LEVEL_UNSPECIFIED", + 1: "API_OPEN", + 2: "API_HYBRID", + 3: "API_OPAQUE", + } + GoFeatures_APILevel_value = map[string]int32{ + "API_LEVEL_UNSPECIFIED": 0, + "API_OPEN": 1, + "API_HYBRID": 2, + "API_OPAQUE": 3, + } +) + +func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel { + p := new(GoFeatures_APILevel) + *p = x + return p +} + +func (x GoFeatures_APILevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_APILevel) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_APILevel(num) + return nil +} + +// Deprecated: Use GoFeatures_APILevel.Descriptor instead. +func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[1] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} + +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1} +} + +type GoFeatures struct { + state protoimpl.MessageState `protogen:"open.v1"` // Whether or not to generate the deprecated UnmarshalJSON method for enums. + // Can only be true for proto using the Open Struct api. LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + // One of OPEN, HYBRID or OPAQUE. + ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +173,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,6 +195,20 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } +func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel { + if x != nil && x.ApiLevel != nil { + return *x.ApiLevel + } + return GoFeatures_API_LEVEL_UNSPECIFIED +} + +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), @@ -85,12 +228,12 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, @@ -103,41 +246,76 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, + 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, + 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, + 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, + 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, + 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, + 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, + 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +}) var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet + (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel + (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 2: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet } var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel + 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet + 2, // 3: pb.go:type_name -> pb.GoFeatures + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_protobuf_go_features_proto_init() } @@ -145,37 +323,23 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), + NumEnums: 2, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, GoTypes: file_google_protobuf_go_features_proto_goTypes, DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, MessageInfos: file_google_protobuf_go_features_proto_msgTypes, ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d3..497da66e9 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -210,10 +211,7 @@ import ( // "value": "1.212s" // } type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent @@ -244,7 +242,9 @@ type Any struct { // used with implementation specific semantics. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New marshals src into a new Any instance. @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -414,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -430,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -461,25 +459,11 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -490,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd91..193880d18 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -80,6 +80,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Duration represents a signed, fixed-length span of time represented @@ -141,10 +142,7 @@ import ( // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years @@ -155,7 +153,9 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a new Duration from the provided time.Duration. @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -291,7 +289,7 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = []byte{ +var file_google_protobuf_duration_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -308,16 +306,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc + file_google_protobuf_duration_proto_rawDescData []byte ) func file_google_protobuf_duration_proto_rawDescGZIP() []byte { file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) }) return file_google_protobuf_duration_proto_rawDescData } @@ -339,25 +337,11 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -368,7 +352,6 @@ func file_google_protobuf_duration_proto_init() { MessageInfos: file_google_protobuf_duration_proto_msgTypes, }.Build() File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil file_google_protobuf_duration_proto_goTypes = nil file_google_protobuf_duration_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb82..a5b8657c4 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -38,6 +38,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // A generic empty message that you can re-use to avoid defining duplicated @@ -48,18 +49,16 @@ import ( // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +69,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -87,7 +86,7 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = []byte{ +var file_google_protobuf_empty_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, @@ -100,16 +99,16 @@ var file_google_protobuf_empty_proto_rawDesc = []byte{ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc + file_google_protobuf_empty_proto_rawDescData []byte ) func file_google_protobuf_empty_proto_rawDescGZIP() []byte { file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc))) }) return file_google_protobuf_empty_proto_rawDescData } @@ -131,25 +130,11 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -160,7 +145,6 @@ func file_google_protobuf_empty_proto_init() { MessageInfos: file_google_protobuf_empty_proto_msgTypes, }.Build() File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil file_google_protobuf_empty_proto_goTypes = nil file_google_protobuf_empty_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6..041feb0f3 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -83,6 +83,7 @@ import ( sort "sort" strings "strings" sync "sync" + unsafe "unsafe" ) // `FieldMask` represents a set of symbolic field paths, for example: @@ -284,12 +285,11 @@ import ( // request should verify the included field paths, and return an // `INVALID_ARGUMENT` error if any path is unmappable. type FieldMask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a field mask from a list of paths and verifies that @@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -506,7 +504,7 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ +var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -522,16 +520,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc + file_google_protobuf_field_mask_proto_rawDescData []byte ) func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc))) }) return file_google_protobuf_field_mask_proto_rawDescData } @@ -553,25 +551,11 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -582,7 +566,6 @@ func file_google_protobuf_field_mask_proto_init() { MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, }.Build() File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil file_google_protobuf_field_mask_proto_goTypes = nil file_google_protobuf_field_mask_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc..ecdd31ab5 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -127,6 +128,7 @@ import ( reflect "reflect" sync "sync" utf8 "unicode/utf8" + unsafe "unsafe" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -186,12 +188,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) { // // The JSON representation for `Struct` is JSON object. type Struct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewStruct constructs a Struct from a general-purpose Go map. @@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -277,13 +276,10 @@ func (x *Struct) GetFields() map[string]*Value { // // The JSON representation for `Value` is JSON value. type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The kind of value. // - // Types that are assignable to Kind: + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_NumberValue @@ -291,24 +287,27 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +319,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +341,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +454,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +467,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -471,51 +482,63 @@ func (*Value) Descriptor() ([]byte, []int) { return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return NullValue_NULL_VALUE } func (x *Value) GetNumberValue() float64 { - if x, ok := x.GetKind().(*Value_NumberValue); ok { - return x.NumberValue + if x != nil { + if x, ok := x.Kind.(*Value_NumberValue); ok { + return x.NumberValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetStructValue() *Struct { - if x, ok := x.GetKind().(*Value_StructValue); ok { - return x.StructValue + if x != nil { + if x, ok := x.Kind.(*Value_StructValue); ok { + return x.StructValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } @@ -570,12 +593,11 @@ func (*Value_ListValue) isValue_Kind() {} // // The JSON representation for `ListValue` is JSON array. type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewList constructs a ListValue from a general-purpose Go slice. @@ -613,11 +635,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +648,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -652,7 +672,7 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = []byte{ +var file_google_protobuf_struct_proto_rawDesc = string([]byte{ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, @@ -700,16 +720,16 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc + file_google_protobuf_struct_proto_rawDescData []byte ) func file_google_protobuf_struct_proto_rawDescGZIP() []byte { file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) }) return file_google_protobuf_struct_proto_rawDescData } @@ -742,44 +762,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), @@ -792,7 +774,7 @@ func file_google_protobuf_struct_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), NumEnums: 1, NumMessages: 4, NumExtensions: 0, @@ -804,7 +786,6 @@ func file_google_protobuf_struct_proto_init() { MessageInfos: file_google_protobuf_struct_proto_msgTypes, }.Build() File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil file_google_protobuf_struct_proto_goTypes = nil file_google_protobuf_struct_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b..00ac835c0 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -78,6 +78,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Timestamp represents a point in time independent of any time zone or local @@ -170,10 +171,7 @@ import ( // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -182,7 +180,9 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Now constructs a new Timestamp from the current time. @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -300,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ +var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, @@ -317,16 +315,16 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc + file_google_protobuf_timestamp_proto_rawDescData []byte ) func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) }) return file_google_protobuf_timestamp_proto_rawDescData } @@ -348,25 +346,11 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -377,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() { MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, }.Build() File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil file_google_protobuf_timestamp_proto_goTypes = nil file_google_protobuf_timestamp_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826a..5de530106 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -48,18 +48,18 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // Wrapper message for `double`. // // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Double stores v in a new DoubleValue and returns a pointer to it. @@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -110,12 +108,11 @@ func (x *DoubleValue) GetValue() float64 { // // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Float stores v in a new FloatValue and returns a pointer to it. @@ -125,11 +122,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +135,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -166,12 +161,11 @@ func (x *FloatValue) GetValue() float32 { // // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int64 stores v in a new Int64Value and returns a pointer to it. @@ -181,11 +175,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +188,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -222,12 +214,11 @@ func (x *Int64Value) GetValue() int64 { // // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt64 stores v in a new UInt64Value and returns a pointer to it. @@ -237,11 +228,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +241,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -278,12 +267,11 @@ func (x *UInt64Value) GetValue() uint64 { // // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int32 stores v in a new Int32Value and returns a pointer to it. @@ -293,11 +281,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +294,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -334,12 +320,11 @@ func (x *Int32Value) GetValue() int32 { // // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt32 stores v in a new UInt32Value and returns a pointer to it. @@ -349,11 +334,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +347,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -390,12 +373,11 @@ func (x *UInt32Value) GetValue() uint32 { // // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bool stores v in a new BoolValue and returns a pointer to it. @@ -405,11 +387,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +400,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -446,12 +426,11 @@ func (x *BoolValue) GetValue() bool { // // The JSON representation for `StringValue` is JSON string. type StringValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // String stores v in a new StringValue and returns a pointer to it. @@ -461,11 +440,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +453,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -502,12 +479,11 @@ func (x *StringValue) GetValue() string { // // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bytes stores v in a new BytesValue and returns a pointer to it. @@ -517,11 +493,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +506,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -556,7 +530,7 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = []byte{ +var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -590,16 +564,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once - file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc + file_google_protobuf_wrappers_proto_rawDescData []byte ) func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { - file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc))) }) return file_google_protobuf_wrappers_proto_rawDescData } @@ -629,121 +603,11 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)), NumEnums: 0, NumMessages: 9, NumExtensions: 0, @@ -754,7 +618,6 @@ func file_google_protobuf_wrappers_proto_init() { MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, }.Build() File_google_protobuf_wrappers_proto = out.File - file_google_protobuf_wrappers_proto_rawDesc = nil file_google_protobuf_wrappers_proto_goTypes = nil file_google_protobuf_wrappers_proto_depIdxs = nil } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 385c60e0d..98066211d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 111cc7287..993ff6f20 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1" k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,10 +46,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} } +func (*ApplyConfiguration) ProtoMessage() {} +func (*ApplyConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{0} +} +func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplyConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyConfiguration.Merge(m, src) +} +func (m *ApplyConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ApplyConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo + func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } func (*AuditAnnotation) ProtoMessage() {} func (*AuditAnnotation) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{0} + return fileDescriptor_2c49182728ae0af5, []int{1} } func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +105,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } func (*ExpressionWarning) ProtoMessage() {} func (*ExpressionWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{1} + return fileDescriptor_2c49182728ae0af5, []int{2} } func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -101,10 +130,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() { var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo +func (m *JSONPatch) Reset() { *m = JSONPatch{} } +func (*JSONPatch) ProtoMessage() {} +func (*JSONPatch) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{3} +} +func (m *JSONPatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONPatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONPatch.Merge(m, src) +} +func (m *JSONPatch) XXX_Size() int { + return m.Size() +} +func (m *JSONPatch) XXX_DiscardUnknown() { + xxx_messageInfo_JSONPatch.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONPatch proto.InternalMessageInfo + func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{2} + return fileDescriptor_2c49182728ae0af5, []int{4} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +189,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{3} + return fileDescriptor_2c49182728ae0af5, []int{5} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,10 +214,206 @@ func (m *MatchResources) XXX_DiscardUnknown() { var xxx_messageInfo_MatchResources proto.InternalMessageInfo +func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} } +func (*MutatingAdmissionPolicy) ProtoMessage() {} +func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{6} +} +func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src) +} +func (m *MutatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} } +func (*MutatingAdmissionPolicyBinding) ProtoMessage() {} +func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{7} +} +func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} } +func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{8} +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} } +func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{9} +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} } +func (*MutatingAdmissionPolicyList) ProtoMessage() {} +func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{10} +} +func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} } +func (*MutatingAdmissionPolicySpec) ProtoMessage() {} +func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{11} +} +func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *Mutation) Reset() { *m = Mutation{} } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{12} +} +func (m *Mutation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Mutation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mutation.Merge(m, src) +} +func (m *Mutation) XXX_Size() int { + return m.Size() +} +func (m *Mutation) XXX_DiscardUnknown() { + xxx_messageInfo_Mutation.DiscardUnknown(m) +} + +var xxx_messageInfo_Mutation proto.InternalMessageInfo + func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{4} + return fileDescriptor_2c49182728ae0af5, []int{13} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +441,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{5} + return fileDescriptor_2c49182728ae0af5, []int{14} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +469,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{6} + return fileDescriptor_2c49182728ae0af5, []int{15} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +497,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *TypeChecking) Reset() { *m = TypeChecking{} } func (*TypeChecking) ProtoMessage() {} func (*TypeChecking) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{7} + return fileDescriptor_2c49182728ae0af5, []int{16} } func (m *TypeChecking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +525,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{8} + return fileDescriptor_2c49182728ae0af5, []int{17} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +553,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{9} + return fileDescriptor_2c49182728ae0af5, []int{18} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +581,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{10} + return fileDescriptor_2c49182728ae0af5, []int{19} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +609,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{11} + return fileDescriptor_2c49182728ae0af5, []int{20} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +637,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{12} + return fileDescriptor_2c49182728ae0af5, []int{21} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +665,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{13} + return fileDescriptor_2c49182728ae0af5, []int{22} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +693,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{14} + return fileDescriptor_2c49182728ae0af5, []int{23} } func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +721,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{15} + return fileDescriptor_2c49182728ae0af5, []int{24} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +749,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo func (m *Variable) Reset() { *m = Variable{} } func (*Variable) ProtoMessage() {} func (*Variable) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{16} + return fileDescriptor_2c49182728ae0af5, []int{25} } func (m *Variable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,10 +775,19 @@ func (m *Variable) XXX_DiscardUnknown() { var xxx_messageInfo_Variable proto.InternalMessageInfo func init() { + proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ApplyConfiguration") proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1alpha1.JSONPatch") proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") + proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy") + proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding") + proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingList") + proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec") + proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyList") + proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec") + proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Mutation") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") @@ -546,101 +808,147 @@ func init() { } var fileDescriptor_2c49182728ae0af5 = []byte{ - // 1498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0x73, 0xb1, 0xe7, 0xdf, 0x2a, 0x6e, 0xfe, 0xd4, 0x1b, 0xad, - 0x2a, 0xd4, 0x48, 0xb0, 0x26, 0x69, 0xa1, 0xb4, 0x42, 0x42, 0xd9, 0xde, 0xe8, 0x25, 0x17, 0x4d, - 0x51, 0x22, 0x21, 0x90, 0x98, 0xec, 0x4e, 0xec, 0x69, 0xbc, 0x17, 0x76, 0xd6, 0xa1, 0x11, 0x48, - 0x54, 0xe2, 0x05, 0xde, 0x78, 0xe0, 0x85, 0x57, 0x3e, 0x02, 0xdf, 0x80, 0xb7, 0x3e, 0xf6, 0xb1, - 0x3c, 0x60, 0x51, 0xf3, 0xc2, 0x27, 0x00, 0x29, 0x2f, 0xa0, 0x99, 0x9d, 0xbd, 0xda, 0x26, 0x76, - 0x09, 0xbc, 0x79, 0xce, 0x9c, 0xf3, 0xfb, 0xcd, 0x39, 0x73, 0xce, 0xd9, 0x33, 0x06, 0xd7, 0x0e, - 0xde, 0x66, 0x3a, 0x75, 0x1b, 0xd8, 0xa3, 0x0d, 0x6c, 0xd9, 0x94, 0x31, 0xea, 0x3a, 0x3e, 0x69, - 0x52, 0x16, 0xf8, 0x38, 0xa0, 0xae, 0xd3, 0x38, 0x5c, 0xc5, 0x6d, 0xaf, 0x85, 0x57, 0x1b, 0x4d, - 0xe2, 0x10, 0x1f, 0x07, 0xc4, 0xd2, 0x3d, 0xdf, 0x0d, 0x5c, 0xb8, 0x12, 0x9a, 0xea, 0xd8, 0xa3, - 0xfa, 0x40, 0x53, 0x3d, 0x32, 0x5d, 0x7a, 0xbd, 0x49, 0x83, 0x56, 0x67, 0x4f, 0x37, 0x5d, 0xbb, - 0xd1, 0x74, 0x9b, 0x6e, 0x43, 0x20, 0xec, 0x75, 0xf6, 0xc5, 0x4a, 0x2c, 0xc4, 0xaf, 0x10, 0x79, - 0xe9, 0xf2, 0x08, 0x87, 0xca, 0x1f, 0x67, 0xe9, 0x4a, 0x62, 0x64, 0x63, 0xb3, 0x45, 0x1d, 0xe2, - 0x1f, 0x35, 0xbc, 0x83, 0x26, 0x17, 0xb0, 0x86, 0x4d, 0x02, 0x3c, 0xc8, 0xaa, 0x31, 0xcc, 0xca, - 0xef, 0x38, 0x01, 0xb5, 0x49, 0x9f, 0xc1, 0x5b, 0x27, 0x19, 0x30, 0xb3, 0x45, 0x6c, 0x9c, 0xb7, - 0xd3, 0x18, 0x58, 0x58, 0xef, 0x58, 0x34, 0x58, 0x77, 0x1c, 0x37, 0x10, 0x4e, 0xc0, 0x0b, 0xa0, - 0x70, 0x40, 0x8e, 0x6a, 0xca, 0xb2, 0x72, 0xa9, 0x64, 0x94, 0x9f, 0x76, 0xd5, 0x89, 0x5e, 0x57, - 0x2d, 0xdc, 0x27, 0x47, 0x88, 0xcb, 0xe1, 0x3a, 0x58, 0x38, 0xc4, 0xed, 0x0e, 0xb9, 0xf5, 0xd8, - 0xf3, 0x89, 0x08, 0x41, 0x6d, 0x52, 0xa8, 0x2e, 0x4a, 0xd5, 0x85, 0x9d, 0xec, 0x36, 0xca, 0xeb, - 0x6b, 0x6d, 0x50, 0x4d, 0x56, 0xbb, 0xd8, 0x77, 0xa8, 0xd3, 0x84, 0xaf, 0x81, 0x99, 0x7d, 0x4a, - 0xda, 0x16, 0x22, 0xfb, 0x12, 0xb0, 0x22, 0x01, 0x67, 0x6e, 0x4b, 0x39, 0x8a, 0x35, 0xe0, 0x0a, - 0x98, 0xfe, 0x34, 0x34, 0xac, 0x15, 0x84, 0xf2, 0x82, 0x54, 0x9e, 0x96, 0x78, 0x28, 0xda, 0xd7, - 0xf6, 0xc1, 0xfc, 0x06, 0x0e, 0xcc, 0xd6, 0x0d, 0xd7, 0xb1, 0xa8, 0xf0, 0x70, 0x19, 0x14, 0x1d, - 0x6c, 0x13, 0xe9, 0xe2, 0xac, 0xb4, 0x2c, 0x6e, 0x62, 0x9b, 0x20, 0xb1, 0x03, 0xd7, 0x00, 0x20, - 0x79, 0xff, 0xa0, 0xd4, 0x03, 0x29, 0xd7, 0x52, 0x5a, 0xda, 0x4f, 0x45, 0x49, 0x84, 0x08, 0x73, - 0x3b, 0xbe, 0x49, 0x18, 0x7c, 0x0c, 0xaa, 0x1c, 0x8e, 0x79, 0xd8, 0x24, 0x0f, 0x49, 0x9b, 0x98, - 0x81, 0xeb, 0x0b, 0xd6, 0xf2, 0xda, 0x65, 0x3d, 0xc9, 0xd3, 0xf8, 0xc6, 0x74, 0xef, 0xa0, 0xc9, - 0x05, 0x4c, 0xe7, 0x89, 0xa1, 0x1f, 0xae, 0xea, 0x0f, 0xf0, 0x1e, 0x69, 0x47, 0xa6, 0xc6, 0xb9, - 0x5e, 0x57, 0xad, 0x6e, 0xe6, 0x11, 0x51, 0x3f, 0x09, 0x74, 0xc1, 0xbc, 0xbb, 0xf7, 0x88, 0x98, - 0x41, 0x4c, 0x3b, 0xf9, 0xf2, 0xb4, 0xb0, 0xd7, 0x55, 0xe7, 0xb7, 0x32, 0x70, 0x28, 0x07, 0x0f, - 0xbf, 0x00, 0x73, 0xbe, 0xf4, 0x1b, 0x75, 0xda, 0x84, 0xd5, 0x0a, 0xcb, 0x85, 0x4b, 0xe5, 0x35, - 0x43, 0x1f, 0xb9, 0x1c, 0x75, 0xee, 0x98, 0xc5, 0x8d, 0x77, 0x69, 0xd0, 0xda, 0xf2, 0x48, 0xb8, - 0xcf, 0x8c, 0x73, 0x32, 0xf0, 0x73, 0x28, 0x4d, 0x80, 0xb2, 0x7c, 0xf0, 0x5b, 0x05, 0x9c, 0x25, - 0x8f, 0xcd, 0x76, 0xc7, 0x22, 0x19, 0xbd, 0x5a, 0xf1, 0xd4, 0x0e, 0xf2, 0x8a, 0x3c, 0xc8, 0xd9, - 0x5b, 0x03, 0x78, 0xd0, 0x40, 0x76, 0x78, 0x13, 0x94, 0x6d, 0x9e, 0x14, 0xdb, 0x6e, 0x9b, 0x9a, - 0x47, 0xb5, 0x69, 0x91, 0x4a, 0x5a, 0xaf, 0xab, 0x96, 0x37, 0x12, 0xf1, 0x71, 0x57, 0x5d, 0x48, - 0x2d, 0xdf, 0x3f, 0xf2, 0x08, 0x4a, 0x9b, 0x69, 0xcf, 0x15, 0xb0, 0x38, 0xe4, 0x54, 0xf0, 0x6a, - 0x12, 0x79, 0x91, 0x1a, 0x35, 0x65, 0xb9, 0x70, 0xa9, 0x64, 0x54, 0xd3, 0x11, 0x13, 0x1b, 0x28, - 0xab, 0x07, 0xbf, 0x54, 0x00, 0xf4, 0xfb, 0xf0, 0x64, 0xa2, 0x5c, 0x1d, 0x25, 0x5e, 0xfa, 0x80, - 0x20, 0x2d, 0xc9, 0x20, 0xc1, 0xfe, 0x3d, 0x34, 0x80, 0x4e, 0xc3, 0xa0, 0xb4, 0x8d, 0x7d, 0x6c, - 0xdf, 0xa7, 0x8e, 0xc5, 0xeb, 0x0e, 0x7b, 0x74, 0x87, 0xf8, 0xa2, 0xee, 0x94, 0x6c, 0xdd, 0xad, - 0x6f, 0xdf, 0x95, 0x3b, 0x28, 0xa5, 0xc5, 0xab, 0xf9, 0x80, 0x3a, 0x96, 0xac, 0xd2, 0xb8, 0x9a, - 0x39, 0x1e, 0x12, 0x3b, 0xda, 0x0f, 0x93, 0x60, 0x46, 0x70, 0xf0, 0xce, 0x71, 0x72, 0xf1, 0x37, - 0x40, 0x29, 0x2e, 0x28, 0x89, 0x5a, 0x95, 0x6a, 0xa5, 0xb8, 0xf8, 0x50, 0xa2, 0x03, 0x3f, 0x02, - 0x33, 0x2c, 0x2a, 0xb3, 0xc2, 0xcb, 0x97, 0xd9, 0x2c, 0xef, 0x75, 0x71, 0x81, 0xc5, 0x90, 0x30, - 0x00, 0x8b, 0x1e, 0x3f, 0x3d, 0x09, 0x88, 0xbf, 0xe9, 0x06, 0xb7, 0xdd, 0x8e, 0x63, 0xad, 0x9b, - 0x3c, 0x7a, 0xb5, 0xa2, 0x38, 0xdd, 0xf5, 0x5e, 0x57, 0x5d, 0xdc, 0x1e, 0xac, 0x72, 0xdc, 0x55, - 0xff, 0x3f, 0x64, 0x4b, 0xa4, 0xd9, 0x30, 0x68, 0xed, 0x3b, 0x05, 0xcc, 0x72, 0x8d, 0x1b, 0x2d, - 0x62, 0x1e, 0xf0, 0x06, 0xfd, 0x95, 0x02, 0x20, 0xc9, 0xb7, 0xed, 0x30, 0xdb, 0xca, 0x6b, 0xef, - 0x8c, 0x51, 0x5e, 0x7d, 0xbd, 0x3f, 0xc9, 0x99, 0xbe, 0x2d, 0x86, 0x06, 0x70, 0x6a, 0x3f, 0x4f, - 0x82, 0xf3, 0x3b, 0xb8, 0x4d, 0x2d, 0x1c, 0x50, 0xa7, 0xb9, 0x1e, 0xd1, 0x85, 0xc5, 0x02, 0x3f, - 0x06, 0x33, 0x3c, 0xc0, 0x16, 0x0e, 0xb0, 0x6c, 0xb6, 0x6f, 0x8c, 0x76, 0x1d, 0x61, 0x8b, 0xdb, - 0x20, 0x01, 0x4e, 0x92, 0x2e, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x02, 0x45, 0xe6, 0x11, 0x53, 0x96, - 0xca, 0x7b, 0x63, 0xf8, 0x3e, 0xf4, 0xd4, 0x0f, 0x3d, 0x62, 0x26, 0xd9, 0xc8, 0x57, 0x48, 0x70, - 0x40, 0x1f, 0x4c, 0xb1, 0x00, 0x07, 0x1d, 0x26, 0x53, 0xeb, 0xde, 0xa9, 0xb0, 0x09, 0x44, 0x63, - 0x5e, 0xf2, 0x4d, 0x85, 0x6b, 0x24, 0x99, 0xb4, 0x3f, 0x14, 0xb0, 0x3c, 0xd4, 0xd6, 0xa0, 0x8e, - 0xc5, 0xf3, 0xe1, 0xdf, 0x0f, 0xf3, 0x27, 0x99, 0x30, 0x6f, 0x9d, 0x86, 0xe3, 0xf2, 0xf0, 0xc3, - 0xa2, 0xad, 0xfd, 0xae, 0x80, 0x8b, 0x27, 0x19, 0x3f, 0xa0, 0x2c, 0x80, 0x1f, 0xf6, 0x79, 0xaf, - 0x8f, 0x58, 0xf3, 0x94, 0x85, 0xbe, 0xc7, 0xe3, 0x4d, 0x24, 0x49, 0x79, 0xee, 0x81, 0x33, 0x34, - 0x20, 0x36, 0x6f, 0xc6, 0xbc, 0xba, 0xee, 0x9f, 0xa2, 0xeb, 0xc6, 0x9c, 0xe4, 0x3d, 0x73, 0x97, - 0x33, 0xa0, 0x90, 0x48, 0xfb, 0xba, 0x70, 0xb2, 0xe3, 0x3c, 0x4e, 0xbc, 0x45, 0x7b, 0x42, 0xb8, - 0x99, 0x74, 0xd1, 0xf8, 0x1a, 0xb7, 0xe3, 0x1d, 0x94, 0xd2, 0xe2, 0x0d, 0xd2, 0x93, 0xfd, 0x77, - 0xc0, 0x1c, 0x72, 0x92, 0x47, 0x51, 0xeb, 0x0e, 0x1b, 0x64, 0xb4, 0x42, 0x31, 0x24, 0xec, 0x80, - 0x79, 0x3b, 0x33, 0x78, 0xc9, 0x52, 0xb9, 0x36, 0x06, 0x49, 0x76, 0x72, 0x0b, 0x47, 0x9e, 0xac, - 0x0c, 0xe5, 0x48, 0xe0, 0x2e, 0xa8, 0x1e, 0xca, 0x88, 0xb9, 0x4e, 0xd8, 0x35, 0xc3, 0x69, 0xa3, - 0x64, 0xac, 0xf0, 0x41, 0x6d, 0x27, 0xbf, 0x79, 0xdc, 0x55, 0x2b, 0x79, 0x21, 0xea, 0xc7, 0xd0, - 0x7e, 0x53, 0xc0, 0x85, 0xa1, 0x77, 0xf1, 0x1f, 0x64, 0x1f, 0xcd, 0x66, 0xdf, 0xcd, 0x53, 0xc9, - 0xbe, 0xc1, 0x69, 0xf7, 0xfd, 0xd4, 0xdf, 0xb8, 0x2a, 0xf2, 0x0d, 0x83, 0x92, 0x17, 0xcd, 0x07, - 0xd2, 0xd7, 0x2b, 0xe3, 0x26, 0x0f, 0xb7, 0x35, 0xe6, 0xf8, 0xf7, 0x3b, 0x5e, 0xa2, 0x04, 0x15, - 0x7e, 0x06, 0x2a, 0xb6, 0x7c, 0x21, 0x70, 0x00, 0xea, 0x04, 0xd1, 0x14, 0xf4, 0x0f, 0x32, 0xe8, - 0x6c, 0xaf, 0xab, 0x56, 0x36, 0x72, 0xb0, 0xa8, 0x8f, 0x08, 0xb6, 0x41, 0x39, 0xc9, 0x80, 0x68, - 0x6c, 0x7e, 0xf3, 0x25, 0x42, 0xee, 0x3a, 0xc6, 0xff, 0x64, 0x8c, 0xcb, 0x89, 0x8c, 0xa1, 0x34, - 0x3c, 0x7c, 0x00, 0xe6, 0xf6, 0x31, 0x6d, 0x77, 0x7c, 0x22, 0x07, 0xd2, 0x70, 0x82, 0x78, 0x95, - 0x0f, 0x8b, 0xb7, 0xd3, 0x1b, 0xc7, 0x5d, 0xb5, 0x9a, 0x11, 0x88, 0x69, 0x21, 0x6b, 0x0c, 0x9f, - 0x28, 0xa0, 0x82, 0xb3, 0xcf, 0x47, 0x56, 0x3b, 0x23, 0x3c, 0xb8, 0x3e, 0x86, 0x07, 0xb9, 0x17, - 0xa8, 0x51, 0x93, 0x6e, 0x54, 0x72, 0x1b, 0x0c, 0xf5, 0xb1, 0xc1, 0xcf, 0xc1, 0x82, 0x9d, 0x79, - 0xdd, 0xb1, 0xda, 0x94, 0x38, 0xc0, 0xd8, 0x57, 0x17, 0x23, 0x24, 0x2f, 0xd9, 0xac, 0x9c, 0xa1, - 0x3c, 0x15, 0xb4, 0x40, 0xe9, 0x10, 0xfb, 0x14, 0xef, 0xf1, 0x87, 0xc6, 0xb4, 0xe0, 0xbd, 0x3c, - 0xd6, 0xd5, 0x85, 0xb6, 0xc9, 0x7c, 0x19, 0x49, 0x18, 0x4a, 0x80, 0xb5, 0x1f, 0x27, 0x81, 0x7a, - 0xc2, 0xa7, 0x1c, 0xde, 0x03, 0xd0, 0xdd, 0x63, 0xc4, 0x3f, 0x24, 0xd6, 0x9d, 0xf0, 0x8d, 0x1f, - 0x4d, 0xd0, 0x85, 0x64, 0xbc, 0xda, 0xea, 0xd3, 0x40, 0x03, 0xac, 0xa0, 0x0d, 0x66, 0x83, 0xd4, - 0xe4, 0x37, 0xce, 0x8b, 0x40, 0x3a, 0x96, 0x1e, 0x1c, 0x8d, 0x4a, 0xaf, 0xab, 0x66, 0x46, 0x49, - 0x94, 0x81, 0x87, 0x26, 0x00, 0x66, 0x72, 0x7b, 0x61, 0x01, 0x34, 0x46, 0x6b, 0x67, 0xc9, 0x9d, - 0xc5, 0x9f, 0xa0, 0xd4, 0x75, 0xa5, 0x60, 0xb5, 0x3f, 0x15, 0x00, 0x92, 0xaa, 0x80, 0x17, 0x41, - 0xea, 0x19, 0x2f, 0xbf, 0x62, 0x45, 0x0e, 0x81, 0x52, 0x72, 0xb8, 0x02, 0xa6, 0x6d, 0xc2, 0x18, - 0x6e, 0x46, 0xef, 0x80, 0xf8, 0x5f, 0x86, 0x8d, 0x50, 0x8c, 0xa2, 0x7d, 0xb8, 0x0b, 0xa6, 0x7c, - 0x82, 0x99, 0xeb, 0xc8, 0xff, 0x23, 0xde, 0xe5, 0x63, 0x15, 0x12, 0x92, 0xe3, 0xae, 0xba, 0x3a, - 0xca, 0xbf, 0x40, 0xba, 0x9c, 0xc2, 0x84, 0x11, 0x92, 0x70, 0xf0, 0x0e, 0xa8, 0x4a, 0x8e, 0xd4, - 0x81, 0xc3, 0xaa, 0x3d, 0x2f, 0x4f, 0x53, 0xdd, 0xc8, 0x2b, 0xa0, 0x7e, 0x1b, 0xed, 0x1e, 0x98, - 0x89, 0xb2, 0x0b, 0xd6, 0x40, 0x31, 0xf5, 0xf9, 0x0e, 0x1d, 0x17, 0x92, 0x5c, 0x60, 0x26, 0x07, - 0x07, 0xc6, 0xd8, 0x7a, 0xfa, 0xa2, 0x3e, 0xf1, 0xec, 0x45, 0x7d, 0xe2, 0xf9, 0x8b, 0xfa, 0xc4, - 0x93, 0x5e, 0x5d, 0x79, 0xda, 0xab, 0x2b, 0xcf, 0x7a, 0x75, 0xe5, 0x79, 0xaf, 0xae, 0xfc, 0xd2, - 0xab, 0x2b, 0xdf, 0xfc, 0x5a, 0x9f, 0xf8, 0x60, 0x65, 0xe4, 0x7f, 0xf1, 0xfe, 0x0a, 0x00, 0x00, - 0xff, 0xff, 0x22, 0xbd, 0xc5, 0xc7, 0xf1, 0x13, 0x00, 0x00, + // 1783 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x1b, 0x4b, + 0x15, 0xcf, 0xda, 0xce, 0x87, 0xc7, 0xf9, 0xf2, 0xd0, 0x12, 0x37, 0xa5, 0xde, 0x68, 0x55, 0xa1, + 0x46, 0x82, 0x35, 0x49, 0x0b, 0xa5, 0x55, 0x51, 0x95, 0x6d, 0x9b, 0xb6, 0x69, 0x9d, 0x44, 0x53, + 0x94, 0x20, 0x04, 0x12, 0x93, 0xf5, 0xc4, 0xde, 0xc6, 0xfb, 0xc1, 0xce, 0x3a, 0x34, 0x02, 0x89, + 0x4a, 0x08, 0x09, 0xde, 0x78, 0xe0, 0x85, 0x37, 0xc4, 0x1f, 0xc0, 0x03, 0xfc, 0x05, 0xbc, 0xf5, + 0xb1, 0x8f, 0xe5, 0x81, 0x15, 0x35, 0x20, 0xf1, 0x0c, 0xd2, 0xbd, 0x52, 0x5e, 0xee, 0xd5, 0xcc, + 0xce, 0x7e, 0x79, 0xed, 0xc6, 0x4e, 0xd3, 0xf4, 0xe1, 0xde, 0x37, 0xcf, 0xf9, 0xf8, 0x9d, 0x39, + 0x67, 0xce, 0x9c, 0x39, 0xc7, 0x0b, 0x6e, 0x1d, 0x7c, 0x97, 0xaa, 0x86, 0x5d, 0xc3, 0x8e, 0x51, + 0xc3, 0x0d, 0xd3, 0xa0, 0xd4, 0xb0, 0x2d, 0x97, 0x34, 0x0d, 0xea, 0xb9, 0xd8, 0x33, 0x6c, 0xab, + 0x76, 0xb8, 0x82, 0xdb, 0x4e, 0x0b, 0xaf, 0xd4, 0x9a, 0xc4, 0x22, 0x2e, 0xf6, 0x48, 0x43, 0x75, + 0x5c, 0xdb, 0xb3, 0xe1, 0x72, 0xa0, 0xaa, 0x62, 0xc7, 0x50, 0xfb, 0xaa, 0xaa, 0xa1, 0xea, 0xe2, + 0x37, 0x9b, 0x86, 0xd7, 0xea, 0xec, 0xa9, 0xba, 0x6d, 0xd6, 0x9a, 0x76, 0xd3, 0xae, 0x71, 0x84, + 0xbd, 0xce, 0x3e, 0x5f, 0xf1, 0x05, 0xff, 0x15, 0x20, 0x2f, 0x5e, 0x1f, 0x62, 0x53, 0xbd, 0xdb, + 0x59, 0xbc, 0x11, 0x2b, 0x99, 0x58, 0x6f, 0x19, 0x16, 0x71, 0x8f, 0x6a, 0xce, 0x41, 0x93, 0x11, + 0x68, 0xcd, 0x24, 0x1e, 0xee, 0xa7, 0x55, 0x1b, 0xa4, 0xe5, 0x76, 0x2c, 0xcf, 0x30, 0x49, 0x46, + 0xe1, 0x3b, 0x27, 0x29, 0x50, 0xbd, 0x45, 0x4c, 0xdc, 0xab, 0xa7, 0x3c, 0x02, 0x70, 0xcd, 0x71, + 0xda, 0x47, 0xf7, 0x6c, 0x6b, 0xdf, 0x68, 0x76, 0x02, 0x3f, 0xe0, 0x2a, 0x00, 0xe4, 0x85, 0xe3, + 0x12, 0xee, 0x61, 0x45, 0x5a, 0x92, 0xae, 0x15, 0x35, 0xf8, 0xca, 0x97, 0xc7, 0xba, 0xbe, 0x0c, + 0x1e, 0x44, 0x1c, 0x94, 0x90, 0x52, 0x28, 0x98, 0x5b, 0xeb, 0x34, 0x0c, 0x6f, 0xcd, 0xb2, 0x6c, + 0x2f, 0x80, 0xb9, 0x02, 0xf2, 0x07, 0xe4, 0x48, 0xe8, 0x97, 0x84, 0x7e, 0xfe, 0x09, 0x39, 0x42, + 0x8c, 0x0e, 0xd7, 0xc0, 0xdc, 0x21, 0x6e, 0x77, 0x48, 0x0c, 0x58, 0xc9, 0x71, 0xd1, 0x05, 0x21, + 0x3a, 0xb7, 0x93, 0x66, 0xa3, 0x5e, 0x79, 0xa5, 0x0d, 0xca, 0xf1, 0x6a, 0x17, 0xbb, 0x96, 0x61, + 0x35, 0xe1, 0x37, 0xc0, 0xd4, 0xbe, 0x41, 0xda, 0x0d, 0x44, 0xf6, 0x05, 0xe0, 0xbc, 0x00, 0x9c, + 0x5a, 0x17, 0x74, 0x14, 0x49, 0xc0, 0x65, 0x30, 0xf9, 0xb3, 0x40, 0xb1, 0x92, 0xe7, 0xc2, 0x73, + 0x42, 0x78, 0x52, 0xe0, 0xa1, 0x90, 0xaf, 0xdc, 0x05, 0xc5, 0x8d, 0x67, 0x5b, 0x9b, 0xdb, 0xd8, + 0xd3, 0x5b, 0xa7, 0x8a, 0xd1, 0x3e, 0x98, 0xad, 0x33, 0xe5, 0x7b, 0xb6, 0xd5, 0x30, 0x78, 0x88, + 0x96, 0x40, 0xc1, 0xc2, 0x26, 0x11, 0xfa, 0xd3, 0x42, 0xbf, 0xb0, 0x89, 0x4d, 0x82, 0x38, 0xa7, + 0xc7, 0x4e, 0x6e, 0x28, 0x3b, 0x7f, 0x2f, 0x08, 0x43, 0x88, 0x50, 0xbb, 0xe3, 0xea, 0x84, 0xc2, + 0x17, 0xa0, 0xcc, 0xe0, 0xa8, 0x83, 0x75, 0xf2, 0x8c, 0xb4, 0x89, 0xee, 0xd9, 0x2e, 0xb7, 0x5a, + 0x5a, 0xbd, 0xae, 0xc6, 0x57, 0x26, 0x4a, 0x1e, 0xd5, 0x39, 0x68, 0x32, 0x02, 0x55, 0x59, 0x8e, + 0xaa, 0x87, 0x2b, 0xea, 0x53, 0xbc, 0x47, 0xda, 0xa1, 0xaa, 0x76, 0xb1, 0xeb, 0xcb, 0xe5, 0xcd, + 0x5e, 0x44, 0x94, 0x35, 0x02, 0x6d, 0x30, 0x6b, 0xef, 0x3d, 0x27, 0xba, 0x17, 0x99, 0xcd, 0x9d, + 0xde, 0x2c, 0xec, 0xfa, 0xf2, 0xec, 0x56, 0x0a, 0x0e, 0xf5, 0xc0, 0xc3, 0x5f, 0x82, 0x19, 0x57, + 0xf8, 0x8d, 0x3a, 0x6d, 0x42, 0x2b, 0xf9, 0xa5, 0xfc, 0xb5, 0xd2, 0xaa, 0xa6, 0x0e, 0x5d, 0x19, + 0x54, 0xe6, 0x58, 0x83, 0x29, 0xef, 0x1a, 0x5e, 0x6b, 0xcb, 0x21, 0x01, 0x9f, 0x6a, 0x17, 0x45, + 0xe0, 0x67, 0x50, 0xd2, 0x00, 0x4a, 0xdb, 0x83, 0xbf, 0x97, 0xc0, 0x05, 0xf2, 0x42, 0x6f, 0x77, + 0x1a, 0x24, 0x25, 0x57, 0x29, 0x9c, 0xd9, 0x46, 0xbe, 0x26, 0x36, 0x72, 0xe1, 0x41, 0x1f, 0x3b, + 0xa8, 0xaf, 0x75, 0x78, 0x1f, 0x94, 0x4c, 0x96, 0x14, 0xdb, 0x76, 0xdb, 0xd0, 0x8f, 0x2a, 0x93, + 0x3c, 0x95, 0x94, 0xae, 0x2f, 0x97, 0xea, 0x31, 0xf9, 0xd8, 0x97, 0xe7, 0x12, 0xcb, 0xef, 0x1f, + 0x39, 0x04, 0x25, 0xd5, 0x94, 0xff, 0x48, 0x60, 0xa1, 0xde, 0x61, 0x37, 0xdc, 0x6a, 0xae, 0x85, + 0x9b, 0x0f, 0x78, 0xf0, 0x27, 0x60, 0x8a, 0x1d, 0x5b, 0x03, 0x7b, 0x58, 0xe4, 0xd6, 0xb7, 0x86, + 0x3b, 0xe4, 0xe0, 0x44, 0xeb, 0xc4, 0xc3, 0x71, 0x6e, 0xc7, 0x34, 0x14, 0xa1, 0xc2, 0x16, 0x28, + 0x50, 0x87, 0xe8, 0x22, 0x85, 0xd6, 0x47, 0x88, 0xe4, 0x80, 0x3d, 0x3f, 0x73, 0x88, 0x1e, 0xdf, + 0x3b, 0xb6, 0x42, 0xdc, 0x82, 0xf2, 0x7f, 0x09, 0x54, 0x07, 0xe8, 0x68, 0x86, 0xd5, 0x60, 0x85, + 0xe6, 0xc3, 0xbb, 0x6b, 0xa7, 0xdc, 0xad, 0xbf, 0xbf, 0xbb, 0x62, 0xeb, 0x03, 0xbd, 0xfe, 0x9f, + 0x04, 0x94, 0x77, 0xab, 0x3e, 0x35, 0xa8, 0x07, 0x7f, 0x94, 0xf1, 0x5c, 0x1d, 0xf2, 0x36, 0x1b, + 0x34, 0xf0, 0x3b, 0x2a, 0xc9, 0x21, 0x25, 0xe1, 0xb5, 0x05, 0xc6, 0x0d, 0x8f, 0x98, 0xb4, 0x92, + 0xe3, 0xf7, 0xe5, 0xf1, 0x99, 0xb9, 0xad, 0xcd, 0x08, 0xab, 0xe3, 0x8f, 0x19, 0x3e, 0x0a, 0xcc, + 0x28, 0x7f, 0xce, 0x9d, 0xe4, 0x34, 0x8b, 0x10, 0xab, 0xc4, 0x0e, 0x27, 0x6e, 0xc6, 0x15, 0x3b, + 0x3a, 0xbe, 0xed, 0x88, 0x83, 0x12, 0x52, 0xf0, 0xc7, 0x60, 0xca, 0xc1, 0x2e, 0x36, 0xc3, 0xb7, + 0x28, 0x5d, 0xf6, 0x4e, 0xf2, 0x66, 0x5b, 0xa8, 0x6a, 0xd3, 0x2c, 0x52, 0xe1, 0x0a, 0x45, 0x90, + 0xb0, 0x03, 0x66, 0xcd, 0x54, 0x9d, 0xe7, 0x6f, 0x58, 0x69, 0xf5, 0xd6, 0x28, 0x21, 0x4b, 0x01, + 0x04, 0x15, 0x36, 0x4d, 0x43, 0x3d, 0x46, 0x94, 0x7f, 0x4b, 0xe0, 0xf2, 0x80, 0x80, 0x9d, 0x43, + 0x7a, 0x34, 0xd3, 0xe9, 0xa1, 0x9d, 0x41, 0x7a, 0xf4, 0xcf, 0x8b, 0x3f, 0x4e, 0x0c, 0x74, 0x93, + 0x27, 0x04, 0x06, 0x45, 0x7e, 0x12, 0x4f, 0x0c, 0xab, 0x21, 0xfc, 0xbc, 0x31, 0xea, 0xe9, 0x32, + 0x5d, 0x6d, 0xa6, 0xeb, 0xcb, 0xc5, 0x68, 0x89, 0x62, 0x54, 0xf8, 0x73, 0x30, 0x6f, 0x8a, 0x8e, + 0x81, 0x01, 0x18, 0x96, 0x47, 0x45, 0x1e, 0xbd, 0xc7, 0x11, 0x5f, 0xe8, 0xfa, 0xf2, 0x7c, 0xbd, + 0x07, 0x16, 0x65, 0x0c, 0xc1, 0x06, 0x28, 0x1e, 0x62, 0xd7, 0xc0, 0x7b, 0xf1, 0x23, 0x3a, 0x4a, + 0xf6, 0xee, 0x08, 0x5d, 0xad, 0x2c, 0xa2, 0x5b, 0x0c, 0x29, 0x14, 0xc5, 0xc0, 0xcc, 0x8a, 0xd9, + 0x09, 0x3a, 0xc6, 0xf0, 0x85, 0xbc, 0x3e, 0xf2, 0x91, 0xda, 0x56, 0x6c, 0x25, 0xa4, 0x50, 0x14, + 0x03, 0xc3, 0xa7, 0x60, 0x66, 0x1f, 0x1b, 0xed, 0x8e, 0x4b, 0xc4, 0xf3, 0x37, 0xce, 0xef, 0xef, + 0xd7, 0xd9, 0x63, 0xbe, 0x9e, 0x64, 0x1c, 0xfb, 0x72, 0x39, 0x45, 0xe0, 0x4f, 0x60, 0x5a, 0x19, + 0xfe, 0x02, 0xcc, 0x99, 0xa9, 0x46, 0x8e, 0x56, 0x26, 0xf8, 0xce, 0x47, 0x3e, 0x95, 0x08, 0x21, + 0xee, 0x7a, 0xd3, 0x74, 0x8a, 0x7a, 0x4d, 0xc1, 0xdf, 0x48, 0x00, 0xba, 0xc4, 0xb0, 0x0e, 0x6d, + 0x9d, 0x43, 0xa6, 0x1e, 0xf4, 0x1f, 0x08, 0x18, 0x88, 0x32, 0x12, 0xc7, 0xbe, 0x7c, 0x7b, 0x88, + 0x19, 0x46, 0xcd, 0x6a, 0xf2, 0x18, 0xf4, 0xb1, 0xa9, 0xfc, 0x35, 0x07, 0xa6, 0xc2, 0x78, 0xc3, + 0x3b, 0xec, 0x3e, 0x78, 0x7a, 0x8b, 0x49, 0x8b, 0x4e, 0xb5, 0x1a, 0x1e, 0xca, 0x76, 0xc8, 0x38, + 0x4e, 0x2e, 0x50, 0xac, 0x00, 0x7f, 0x2d, 0x01, 0x88, 0x33, 0xb3, 0x88, 0x28, 0x68, 0xdf, 0x1b, + 0x21, 0xae, 0xd9, 0x81, 0x46, 0xfb, 0x2a, 0x0b, 0x48, 0x96, 0x8e, 0xfa, 0x18, 0x64, 0xb7, 0xfa, + 0x39, 0xb5, 0x2d, 0xbe, 0xc7, 0x4a, 0x61, 0xe4, 0x5b, 0x1d, 0x4d, 0x08, 0xc1, 0xad, 0x8e, 0x96, + 0x28, 0x46, 0x55, 0xde, 0x48, 0x60, 0x61, 0x40, 0x67, 0x07, 0x6f, 0xc6, 0xdd, 0x2b, 0x6f, 0xaf, + 0x2b, 0xd2, 0x52, 0xfe, 0x5a, 0x51, 0x2b, 0x27, 0xbb, 0x4e, 0xce, 0x40, 0x69, 0x39, 0xf8, 0x2b, + 0x96, 0x15, 0x19, 0x3c, 0x51, 0x2d, 0x6e, 0x0e, 0xe3, 0x81, 0xda, 0xa7, 0xd1, 0x5c, 0x8c, 0xd2, + 0x29, 0xc3, 0x43, 0x7d, 0xcc, 0x29, 0x18, 0xc4, 0x85, 0x8c, 0xbd, 0x98, 0xd8, 0x31, 0x76, 0x88, + 0xdb, 0x6f, 0x46, 0x5a, 0xdb, 0x7e, 0x2c, 0x38, 0x28, 0x21, 0xc5, 0x26, 0xa2, 0x03, 0x56, 0x4f, + 0x73, 0xe9, 0x89, 0x88, 0x17, 0x46, 0xce, 0x51, 0xfe, 0x92, 0x03, 0xd1, 0x5b, 0x38, 0xc4, 0x00, + 0x55, 0x03, 0xc5, 0x68, 0x28, 0x11, 0xa8, 0x51, 0xa9, 0x88, 0x06, 0x18, 0x14, 0xcb, 0xb0, 0x37, + 0x9b, 0x86, 0xa3, 0x4a, 0xfe, 0xf4, 0xa3, 0x0a, 0x7f, 0xb3, 0xa3, 0x21, 0x25, 0x82, 0x84, 0x1e, + 0x58, 0xe0, 0xf5, 0x9d, 0x78, 0xc4, 0xdd, 0xb4, 0xbd, 0x75, 0xbb, 0x63, 0x35, 0xd6, 0x74, 0x9e, + 0xeb, 0x05, 0xbe, 0xbb, 0xdb, 0x5d, 0x5f, 0x5e, 0xd8, 0xee, 0x2f, 0x72, 0xec, 0xcb, 0x97, 0x07, + 0xb0, 0xf8, 0x7d, 0x1a, 0x04, 0xad, 0xfc, 0x41, 0x02, 0xd3, 0x4c, 0xe2, 0x5e, 0x8b, 0xe8, 0x07, + 0xac, 0x79, 0x65, 0x45, 0x84, 0xf4, 0xce, 0xce, 0x41, 0xb6, 0x95, 0x56, 0xef, 0x8c, 0x90, 0xf0, + 0x99, 0x01, 0x3c, 0xce, 0x99, 0x0c, 0x8b, 0xa2, 0x3e, 0x36, 0x95, 0x7f, 0xe4, 0xc0, 0xa5, 0x1d, + 0xdc, 0x36, 0x1a, 0x1f, 0x69, 0xa8, 0x78, 0x9e, 0xea, 0xb2, 0x1f, 0x8d, 0xf4, 0xc4, 0x0d, 0xd8, + 0xf5, 0xa0, 0x06, 0x1b, 0xba, 0x60, 0x82, 0x7a, 0xd8, 0xeb, 0x84, 0x9d, 0xda, 0xc6, 0x99, 0x58, + 0xe3, 0x88, 0xda, 0xac, 0xb0, 0x37, 0x11, 0xac, 0x91, 0xb0, 0xa4, 0x7c, 0x2a, 0x81, 0xa5, 0x81, + 0xba, 0xe7, 0x37, 0xcc, 0xfc, 0x34, 0x15, 0xe6, 0xad, 0xb3, 0x70, 0xfc, 0xa4, 0x71, 0xe6, 0x13, + 0x09, 0x5c, 0x3d, 0x49, 0xf9, 0x1c, 0x3a, 0x56, 0x27, 0xdd, 0xb1, 0x3e, 0x39, 0x43, 0xd7, 0x07, + 0xb4, 0xae, 0xbf, 0xcd, 0x9f, 0xec, 0xf8, 0x97, 0x43, 0x4d, 0xea, 0x1f, 0xb2, 0x5d, 0x50, 0x3e, + 0x14, 0x11, 0xb3, 0xad, 0xa0, 0x6a, 0x06, 0xfd, 0x68, 0x51, 0x5b, 0xee, 0xfa, 0x72, 0x79, 0xa7, + 0x97, 0x79, 0xec, 0xcb, 0xf3, 0xbd, 0x44, 0x94, 0xc5, 0x50, 0xfe, 0x2b, 0x81, 0x2b, 0x03, 0xcf, + 0xe2, 0x1c, 0xb2, 0xcf, 0x48, 0x67, 0xdf, 0xfd, 0x33, 0xc9, 0xbe, 0xfe, 0x69, 0xf7, 0xa7, 0x89, + 0x77, 0xb8, 0xfa, 0x85, 0x98, 0x99, 0xda, 0xa0, 0x14, 0x67, 0x40, 0x38, 0x35, 0x7d, 0xfb, 0x14, + 0x21, 0xb7, 0x2d, 0xed, 0x2b, 0x22, 0xc6, 0xa5, 0x98, 0x46, 0x51, 0x12, 0x3e, 0x3b, 0xd5, 0x14, + 0xde, 0x67, 0xaa, 0x79, 0x29, 0x81, 0x79, 0x9c, 0xfe, 0x0f, 0x9f, 0x56, 0xc6, 0xb9, 0x07, 0xb7, + 0x47, 0xe9, 0xbf, 0xd3, 0x10, 0x5a, 0x45, 0xb8, 0x31, 0xdf, 0xc3, 0xa0, 0x28, 0x63, 0xed, 0x23, + 0x0f, 0x56, 0xa9, 0x81, 0x77, 0xf2, 0x03, 0x0d, 0xbc, 0xca, 0xdf, 0x72, 0x40, 0x3e, 0xe1, 0x29, + 0x87, 0x1b, 0x00, 0xda, 0x7b, 0x94, 0xb8, 0x87, 0xa4, 0xf1, 0x30, 0xf8, 0x64, 0x13, 0x76, 0xd0, + 0xf9, 0xb8, 0xbd, 0xda, 0xca, 0x48, 0xa0, 0x3e, 0x5a, 0xd0, 0x04, 0xd3, 0x5e, 0xa2, 0xf3, 0x1b, + 0x65, 0x22, 0x10, 0x8e, 0x25, 0x1b, 0x47, 0x6d, 0xbe, 0xeb, 0xcb, 0xa9, 0x56, 0x12, 0xa5, 0xe0, + 0xa1, 0x0e, 0x80, 0x1e, 0x9f, 0x5e, 0x70, 0x01, 0x6a, 0xc3, 0x95, 0xb3, 0xf8, 0xcc, 0xa2, 0x27, + 0x28, 0x71, 0x5c, 0x09, 0x58, 0xe5, 0x33, 0x09, 0x80, 0xf8, 0x56, 0xc0, 0xab, 0x20, 0xf1, 0x29, + 0x44, 0xbc, 0x62, 0x05, 0x06, 0x81, 0x12, 0x74, 0xb8, 0x0c, 0x26, 0x4d, 0x42, 0x29, 0x6e, 0x86, + 0x73, 0x40, 0xf4, 0xa9, 0xa7, 0x1e, 0x90, 0x51, 0xc8, 0x87, 0xbb, 0x60, 0xc2, 0x25, 0x98, 0x8a, + 0xf9, 0xb3, 0xa8, 0xdd, 0x65, 0x6d, 0x15, 0xe2, 0x94, 0x63, 0x5f, 0x5e, 0x19, 0xe6, 0xa3, 0x9e, + 0x2a, 0xba, 0x30, 0xae, 0x84, 0x04, 0x1c, 0x7c, 0x08, 0xca, 0xc2, 0x46, 0x62, 0xc3, 0xc1, 0xad, + 0xbd, 0x24, 0x76, 0x53, 0xae, 0xf7, 0x0a, 0xa0, 0xac, 0x8e, 0xb2, 0x01, 0xa6, 0xc2, 0xec, 0x82, + 0x15, 0x50, 0x48, 0x3c, 0xdf, 0x81, 0xe3, 0x9c, 0xd2, 0x13, 0x98, 0x5c, 0xff, 0xc0, 0x68, 0x5b, + 0xaf, 0xde, 0x56, 0xc7, 0x5e, 0xbf, 0xad, 0x8e, 0xbd, 0x79, 0x5b, 0x1d, 0x7b, 0xd9, 0xad, 0x4a, + 0xaf, 0xba, 0x55, 0xe9, 0x75, 0xb7, 0x2a, 0xbd, 0xe9, 0x56, 0xa5, 0x7f, 0x76, 0xab, 0xd2, 0xef, + 0xfe, 0x55, 0x1d, 0xfb, 0xe1, 0xf2, 0xd0, 0x1f, 0x65, 0x3f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xac, + 0xc8, 0x8c, 0x78, 0xc0, 0x1d, 0x00, 0x00, +} + +func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { @@ -709,6 +1017,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *JSONPatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MatchCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -824,7 +1160,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -834,18 +1170,18 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -854,19 +1190,20 @@ func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) } i-- dAtA[i] = 0x12 - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0xa + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ParamKind) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -876,187 +1213,12 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ParamRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ParameterNotFoundAction != nil { - i -= len(*m.ParameterNotFoundAction) - copy(dAtA[i:], *m.ParameterNotFoundAction) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) - i-- - dAtA[i] = 0x22 - } - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TypeChecking) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ExpressionWarnings) > 0 { - for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1084,7 +1246,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1094,12 +1256,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1131,7 +1293,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1141,25 +1303,16 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ValidationActions) > 0 { - for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ValidationActions[iNdEx]) - copy(dAtA[i:], m.ValidationActions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } if m.MatchResources != nil { { size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) @@ -1192,7 +1345,7 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1202,12 +1355,12 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1239,7 +1392,7 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1249,30 +1402,21 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Variables) > 0 { - for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } + i -= len(m.ReinvocationPolicy) + copy(dAtA[i:], m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x3a if len(m.MatchConditions) > 0 { for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -1287,10 +1431,17 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, dAtA[i] = 0x32 } } - if len(m.AuditAnnotations) > 0 { - for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x2a + } + if len(m.Mutations) > 0 { + for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1298,20 +1449,13 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 } } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Validations) > 0 { - for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1349,7 +1493,7 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { +func (m *Mutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1359,33 +1503,31 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *Mutation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.JSONPatch != nil { + { + size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if m.TypeChecking != nil { + if m.ApplyConfiguration != nil { { - size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1393,15 +1535,17 @@ func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i -= len(m.PatchType) + copy(dAtA[i:], m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1411,42 +1555,72 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.MessageExpression) - copy(dAtA[i:], m.MessageExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x22 - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + return len(dAtA) - i, nil +} + +func (m *ParamKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) i-- dAtA[i] = 0x12 - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Variable) Marshal() (dAtA []byte, err error) { +func (m *ParamRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1456,19 +1630,38 @@ func (m *Variable) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Variable) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i-- + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) i-- dAtA[i] = 0x12 i -= len(m.Name) @@ -1479,606 +1672,2773 @@ func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *AuditAnnotation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ValueExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ExpressionWarning) Size() (n int) { - if m == nil { - return 0 - } +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.FieldRef) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Warning) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *MatchCondition) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *MatchResources) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ResourceRules) > 0 { - for _, e := range m.ResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ExcludeResourceRules) > 0 { - for _, e := range m.ExcludeResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.RuleWithOperations.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ParamKind) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamRef) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ParameterNotFoundAction != nil { - l = len(*m.ParameterNotFoundAction) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TypeChecking) Size() (n int) { - if m == nil { - return 0 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - var l int - _ = l - if len(m.ExpressionWarnings) > 0 { - for _, e := range m.ExpressionWarnings { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicy) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PolicyName) - n += 1 + l + sovGenerated(uint64(l)) - if m.ParamRef != nil { - l = m.ParamRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } if m.MatchResources != nil { - l = m.MatchResources.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if len(m.ValidationActions) > 0 { - for _, s := range m.ValidationActions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return n + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyList) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - if m.ParamKind != nil { - l = m.ParamKind.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - if m.MatchConstraints != nil { - l = m.MatchConstraints.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } } - if len(m.Validations) > 0 { - for _, e := range m.Validations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if len(m.AuditAnnotations) > 0 { - for _, e := range m.AuditAnnotations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.MatchConditions) > 0 { - for _, e := range m.MatchConditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.Variables) > 0 { - for _, e := range m.Variables { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if m.TypeChecking != nil { - l = m.TypeChecking.Size() - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return n + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Validation) Size() (n int) { - if m == nil { - return 0 +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a } - l = len(m.MessageExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Variable) Size() (n int) { - if m == nil { - return 0 +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AuditAnnotation) String() string { - if this == nil { - return "nil" +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - s := strings.Join([]string{`&AuditAnnotation{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, - `}`, - }, "") - return s + dAtA[offset] = uint8(v) + return base } -func (this *ExpressionWarning) String() string { - if this == nil { - return "nil" +func (m *ApplyConfiguration) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ExpressionWarning{`, - `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, - `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MatchCondition) String() string { - if this == nil { - return "nil" + +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&MatchCondition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MatchResources) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ResourceRules { - repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceRules += "}" - repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ExcludeResourceRules { - repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForExcludeResourceRules += "}" - s := strings.Join([]string{`&MatchResources{`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ResourceRules:` + repeatedStringForResourceRules + `,`, - `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NamedRuleWithOperations) String() string { - if this == nil { - return "nil" + +func (m *JSONPatch) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamedRuleWithOperations{`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ParamKind) String() string { - if this == nil { - return "nil" + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ParamKind{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ParamRef) String() string { - if this == nil { - return "nil" + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ParamRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, - `}`, - }, "") - return s -} -func (this *TypeChecking) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExpressionWarnings := "[]ExpressionWarning{" - for _, f := range this.ExpressionWarnings { - repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExpressionWarnings += "}" - s := strings.Join([]string{`&TypeChecking{`, - `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, - `}`, - }, "") - return s + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ValidatingAdmissionPolicy) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingAdmissionPolicyBinding) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingAdmissionPolicyBindingList) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingAdmissionPolicyBindingSpec) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, - `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, - `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, - `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, - `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ValidatingAdmissionPolicyList) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]ValidatingAdmissionPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingAdmissionPolicySpec) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForValidations += "}" - repeatedStringForAuditAnnotations := "[]AuditAnnotation{" - for _, f := range this.AuditAnnotations { - repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForAuditAnnotations += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForMatchConditions += "}" - repeatedStringForVariables := "[]Variable{" - for _, f := range this.Variables { - repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + if len(m.Mutations) > 0 { + for _, e := range m.Mutations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForVariables += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `Variables:` + repeatedStringForVariables + `,`, - `}`, - }, "") - return s + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingAdmissionPolicyStatus) String() string { - if this == nil { - return "nil" + +func (m *Mutation) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = len(m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + if m.ApplyConfiguration != nil { + l = m.ApplyConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JSONPatch != nil { + l = m.JSONPatch.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ApplyConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ApplyConfiguration{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *JSONPatch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONPatch{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + repeatedStringForMutations := "[]Mutation{" + for _, f := range this.Mutations { + repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + "," + } + repeatedStringForMutations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `Mutations:` + repeatedStringForMutations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *Mutation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mutation{`, + `PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`, + `ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`, + `JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONPatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *Validation) String() string { - if this == nil { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, - `}`, - }, "") - return s + return nil } -func (this *Variable) String() string { - if this == nil { - return "nil" +func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingAdmissionPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&Variable{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2101,17 +4461,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2121,29 +4481,69 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2153,79 +4553,31 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ValueExpression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2235,27 +4587,29 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldRef = string(dAtA[iNdEx:postIndex]) + m.Mutations = append(m.Mutations, Mutation{}) + if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2283,63 +4637,14 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Warning = string(dAtA[iNdEx:postIndex]) + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MatchCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2349,27 +4654,29 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2397,7 +4704,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2420,7 +4727,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *MatchResources) Unmarshal(dAtA []byte) error { +func (m *Mutation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2443,53 +4750,17 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + return fmt.Errorf("proto: Mutation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2499,31 +4770,27 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PatchType = PatchType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2550,14 +4817,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) - if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ApplyConfiguration == nil { + m.ApplyConfiguration = &ApplyConfiguration{} + } + if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2584,43 +4853,12 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) - if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.JSONPatch == nil { + m.JSONPatch = &JSONPatch{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index d5974d5ec..88344ce87 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1alpha1"; +// ApplyConfiguration defines the desired configuration values of an object. +message ApplyConfiguration { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + // AuditAnnotation describes how to produce an audit annotation for an API request. message AuditAnnotation { // key specifies the audit annotation key. The audit annotation keys of @@ -79,6 +124,75 @@ message ExpressionWarning { optional string warning = 3; } +// JSONPatch defines a JSON Patch. +message JSONPatch { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + message MatchCondition { // Name is an identifier for this match condition, used for strategic merging of MatchConditions, // as well as providing an identifier for logging purposes. A good name should be descriptive of @@ -202,6 +316,193 @@ message MatchResources { optional string matchPolicy = 7; } +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +message MutatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicy. + optional MutatingAdmissionPolicySpec spec = 2; +} + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message MutatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + optional MutatingAdmissionPolicyBindingSpec spec = 2; +} + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated MutatingAdmissionPolicyBinding items = 2; +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingSpec { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + optional MatchResources matchResources = 3; +} + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +message MutatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated MutatingAdmissionPolicy items = 2; +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +message MutatingAdmissionPolicySpec { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + optional MatchResources matchConstraints = 2; + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + repeated Variable variables = 3; + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + repeated Mutation mutations = 4; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 5; + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + optional string reinvocationPolicy = 7; +} + +// Mutation specifies the CEL expression which is used to apply the Mutation. +message Mutation { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + optional string patchType = 2; + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + optional ApplyConfiguration applyConfiguration = 3; + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + optional JSONPatch jsonPatch = 4; +} + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic message NamedRuleWithOperations { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index d4c2fbe80..eead376cc 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingAdmissionPolicyList{}, &ValidatingAdmissionPolicyBinding{}, &ValidatingAdmissionPolicyBindingList{}, + &MutatingAdmissionPolicy{}, + &MutatingAdmissionPolicyList{}, + &MutatingAdmissionPolicyBinding{}, + &MutatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index 78d918bc7..ee50fbe2d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -663,3 +663,346 @@ const ( Delete OperationType = v1.Delete Connect OperationType = v1.Connect ) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +type MutatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicy. + Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +type MutatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +type MutatingAdmissionPolicySpec struct { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"` + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// Mutation specifies the CEL expression which is used to apply the Mutation. +type Mutation struct { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"` + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"` + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"` +} + +// PatchType specifies the type of patch operation for a mutation. +// +enum +type PatchType string + +const ( + // ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object. + PatchTypeApplyConfiguration PatchType = "ApplyConfiguration" + // JSONPatch indicates that the object is mutated through JSON Patch. + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// ApplyConfiguration defines the desired configuration values of an object. +type ApplyConfiguration struct { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// JSONPatch defines a JSON Patch. +type JSONPatch struct { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// ReinvocationPolicyType specifies what type of policy the admission mutation uses. +// +enum +type ReinvocationPolicyType = v1.ReinvocationPolicyType + +const ( + // NeverReinvocationPolicy indicates that the mutation must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = v1.NeverReinvocationPolicy + // IfNeededReinvocationPolicy indicates that the mutation may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial mutation call. + IfNeededReinvocationPolicy ReinvocationPolicyType = v1.IfNeededReinvocationPolicy +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type MutatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingSpec struct { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index dcf46b324..32222a81b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ApplyConfiguration = map[string]string{ + "": "ApplyConfiguration defines the desired configuration values of an object.", + "expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (ApplyConfiguration) SwaggerDoc() map[string]string { + return map_ApplyConfiguration +} + var map_AuditAnnotation = map[string]string{ "": "AuditAnnotation describes how to produce an audit annotation for an API request.", "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", @@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string { return map_ExpressionWarning } +var map_JSONPatch = map[string]string{ + "": "JSONPatch defines a JSON Patch.", + "expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (JSONPatch) SwaggerDoc() map[string]string { + return map_JSONPatch +} + var map_MatchResources = map[string]string{ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", @@ -60,6 +78,83 @@ func (MatchResources) SwaggerDoc() map[string]string { return map_MatchResources } +var map_MutatingAdmissionPolicy = map[string]string{ + "": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicy +} + +var map_MutatingAdmissionPolicyBinding = map[string]string{ + "": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.", +} + +func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBinding +} + +var map_MutatingAdmissionPolicyBindingList = map[string]string{ + "": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingList +} + +var map_MutatingAdmissionPolicyBindingSpec = map[string]string{ + "": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.", + "policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.", +} + +func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingSpec +} + +var map_MutatingAdmissionPolicyList = map[string]string{ + "": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyList +} + +var map_MutatingAdmissionPolicySpec = map[string]string{ + "": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.", + "paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.", + "variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.", + "mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.", +} + +func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicySpec +} + +var map_Mutation = map[string]string{ + "": "Mutation specifies the CEL expression which is used to apply the Mutation.", + "patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.", + "applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.", + "jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.", +} + +func (Mutation) SwaggerDoc() map[string]string { + return map_Mutation +} + var map_NamedRuleWithOperations = map[string]string{ "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 24cd0e4e9..97c159c74 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration. +func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration { + if in == nil { + return nil + } + out := new(ApplyConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { *out = *in @@ -58,6 +74,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPatch) DeepCopyInto(out *JSONPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch. +func (in *JSONPatch) DeepCopy() *JSONPatch { + if in == nil { + return nil + } + out := new(JSONPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { *out = *in @@ -119,6 +151,226 @@ func (in *MatchResources) DeepCopy() *MatchResources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy. +func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding. +func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList. +func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList. +func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + if in.Mutations != nil { + in, out := &in.Mutations, &out.Mutations + *out = make([]Mutation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec. +func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mutation) DeepCopyInto(out *Mutation) { + *out = *in + if in.ApplyConfiguration != nil { + in, out := &in.ApplyConfiguration, &out.ApplyConfiguration + *out = new(ApplyConfiguration) + **out = **in + } + if in.JSONPatch != nil { + in, out := &in.JSONPatch, &out.JSONPatch + *out = new(JSONPatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation. +func (in *Mutation) DeepCopy() *Mutation { + if in == nil { + return nil + } + out := new(Mutation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..91c813d5f --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,166 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index d864f2eeb..388e638f4 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -737,8 +737,7 @@ message StatefulSetSpec { // volume claims are created as needed and retained until manually deleted. This // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled - // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is beta. + // down. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index e942cd526..a68690b44 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -142,7 +142,7 @@ const ( // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will not be deleted. RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" - // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // DeletePersistentVolumeClaimRetentionPolicyType specifies that // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will be deleted in the scenario specified in // StatefulSetPersistentVolumeClaimRetentionPolicy. @@ -255,8 +255,7 @@ type StatefulSetSpec struct { // volume claims are created as needed and retained until manually deleted. This // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled - // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is beta. + // down. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index f3e221a0e..341ecdadb 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -354,7 +354,7 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down.", "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 4b0fa366c..46d7bfdf9 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -486,8 +486,7 @@ message StatefulSetSpec { optional int32 minReadySeconds = 9; // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 07bfa88c5..bc4851957 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -181,11 +181,11 @@ const ( // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will not be deleted. RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" - // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // DeletePersistentVolumeClaimRetentionPolicyType specifies that // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will be deleted in the scenario specified in // StatefulSetPersistentVolumeClaimRetentionPolicy. - RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" ) // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs @@ -290,8 +290,7 @@ type StatefulSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 9e7fb1adc..1381d75dc 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -258,7 +258,7 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index d3db8956e..c08a4c78b 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -778,8 +778,7 @@ message StatefulSetSpec { optional int32 minReadySeconds = 9; // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index f93a5bea7..c2624a941 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -191,11 +191,11 @@ const ( // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will not be deleted. RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" - // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // DeletePersistentVolumeClaimRetentionPolicyType specifies that // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will be deleted in the scenario specified in // StatefulSetPersistentVolumeClaimRetentionPolicy. - RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" ) // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs @@ -300,8 +300,7 @@ type StatefulSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 0b8fe34af..beec4b755 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -382,7 +382,7 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 0a961312f..68c35b6b2 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -241,8 +241,6 @@ message HorizontalPodAutoscalerStatus { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -269,7 +267,6 @@ message MetricSpec { // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -286,8 +283,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index b31425b3b..85c609e5c 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -193,8 +193,6 @@ const ( type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -221,7 +219,6 @@ type MetricSpec struct { // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` @@ -355,8 +352,6 @@ type ExternalMetricSource struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 37c2b36a5..ba43d06c1 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -147,11 +147,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -161,7 +161,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index 8f2ee5803..4e6dc0592 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -301,8 +301,6 @@ message MetricIdentifier { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -329,7 +327,6 @@ message MetricSpec { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -346,8 +343,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index 69a7b2701..99e8db09d 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -102,8 +102,6 @@ type CrossVersionObjectReference struct { type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -130,7 +128,6 @@ type MetricSpec struct { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` @@ -453,8 +450,6 @@ type HorizontalPodAutoscalerCondition struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 1941b1ef5..649cd04a0 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -185,11 +185,11 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -199,7 +199,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 232a59815..4b71732ab 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -260,8 +260,6 @@ message HorizontalPodAutoscalerStatus { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -288,7 +286,6 @@ message MetricSpec { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -305,8 +302,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 193cc4354..c3abdd9bd 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -96,8 +96,6 @@ const ( type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -121,7 +119,6 @@ type MetricSpec struct { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated @@ -311,8 +308,6 @@ type HorizontalPodAutoscalerCondition struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index d656ee416..c7c72bf35 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -148,11 +148,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -162,7 +162,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index c88fc1fe2..941d9752a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -297,8 +297,6 @@ message MetricIdentifier { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -325,7 +323,6 @@ message MetricSpec { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -342,8 +339,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 2fee0b8a0..bc9677b14 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -104,8 +104,6 @@ type CrossVersionObjectReference struct { type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -132,7 +130,6 @@ type MetricSpec struct { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` @@ -449,8 +446,6 @@ type HorizontalPodAutoscalerCondition struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 4af7d0ec0..5d4bb86b8 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -185,11 +185,11 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -199,7 +199,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index f5a9385f5..361ebdca1 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -350,8 +350,8 @@ message JobSpec { // characters as defined by RFC 3986. The value cannot exceed 63 characters. // This field is immutable. // - // This field is alpha-level. The job controller accepts setting the field - // when the feature gate JobManagedBy is enabled (disabled by default). + // This field is beta-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (enabled by default). // +optional optional string managedBy = 15; } diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index b42ec231e..8e9a761b9 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -29,7 +29,6 @@ const ( // CronJobScheduledTimestampAnnotation is the scheduled timestamp annotation for the Job. // It records the original/expected scheduled timestamp for the running job, represented in RFC3339. - // The CronJob controller adds this annotation if the CronJobsScheduledAnnotation feature gate (beta in 1.28) is enabled. CronJobScheduledTimestampAnnotation = labelPrefix + "cronjob-scheduled-timestamp" JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" @@ -480,8 +479,8 @@ type JobSpec struct { // characters as defined by RFC 3986. The value cannot exceed 63 characters. // This field is immutable. // - // This field is alpha-level. The job controller accepts setting the field - // when the feature gate JobManagedBy is enabled (disabled by default). + // This field is beta-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (enabled by default). // +optional ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"` } diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index d50488788..893f3371f 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -127,7 +127,7 @@ var map_JobSpec = map[string]string{ "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", - "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go index 1a9fda011..beef02599 100644 --- a/vendor/k8s.io/api/certificates/v1alpha1/types.go +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.34 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors @@ -90,6 +91,7 @@ type ClusterTrustBundleSpec struct { } // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.34 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterTrustBundleList is a collection of ClusterTrustBundle objects diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go index dfafa656c..3121a87d0 100644 --- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -30,13 +30,13 @@ func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { - return 1, 29 + return 1, 34 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { - return 1, 32 + return 1, 37 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -48,11 +48,11 @@ func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { - return 1, 29 + return 1, 34 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { - return 1, 32 + return 1, 37 } diff --git a/vendor/k8s.io/api/coordination/v1alpha1/doc.go b/vendor/k8s.io/api/coordination/v1alpha2/doc.go similarity index 92% rename from vendor/k8s.io/api/coordination/v1alpha1/doc.go rename to vendor/k8s.io/api/coordination/v1alpha2/doc.go index 33a0b0ea9..5e6d65530 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/doc.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=coordination.k8s.io -package v1alpha1 // import "k8s.io/api/coordination/v1alpha1" +package v1alpha2 // import "k8s.io/api/coordination/v1alpha2" diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go similarity index 82% rename from vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go index 9e072e62d..85ceea1f2 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/coordination/v1alpha1/generated.proto +// source: k8s.io/api/coordination/v1alpha2/generated.proto -package v1alpha1 +package v1alpha2 import ( fmt "fmt" @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} } func (*LeaseCandidate) ProtoMessage() {} func (*LeaseCandidate) Descriptor() ([]byte, []int) { - return fileDescriptor_cb9e87df9da593c2, []int{0} + return fileDescriptor_c1ec5c989d262916, []int{0} } func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} } func (*LeaseCandidateList) ProtoMessage() {} func (*LeaseCandidateList) Descriptor() ([]byte, []int) { - return fileDescriptor_cb9e87df9da593c2, []int{1} + return fileDescriptor_c1ec5c989d262916, []int{1} } func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} } func (*LeaseCandidateSpec) ProtoMessage() {} func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cb9e87df9da593c2, []int{2} + return fileDescriptor_c1ec5c989d262916, []int{2} } func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,53 +131,52 @@ func (m *LeaseCandidateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo func init() { - proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidate") - proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateList") - proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateSpec") + proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidate") + proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateList") + proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateSpec") } func init() { - proto.RegisterFile("k8s.io/api/coordination/v1alpha1/generated.proto", fileDescriptor_cb9e87df9da593c2) + proto.RegisterFile("k8s.io/api/coordination/v1alpha2/generated.proto", fileDescriptor_c1ec5c989d262916) } -var fileDescriptor_cb9e87df9da593c2 = []byte{ - // 570 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcb, 0x6e, 0xd3, 0x4c, - 0x14, 0xc7, 0xe3, 0x36, 0xf9, 0x94, 0xcc, 0xd7, 0xa0, 0x30, 0x15, 0x92, 0x95, 0x85, 0x13, 0x65, - 0x55, 0x21, 0x31, 0x6e, 0xa0, 0x42, 0x48, 0xec, 0x5c, 0x40, 0x42, 0x4a, 0x4b, 0xe5, 0x42, 0x25, - 0x50, 0x17, 0x4c, 0xec, 0x53, 0x67, 0x48, 0x7c, 0xd1, 0x78, 0x52, 0x94, 0x5d, 0x17, 0x3c, 0x00, - 0x8f, 0x15, 0x58, 0x75, 0xd9, 0x55, 0x44, 0xcc, 0x5b, 0xb0, 0x42, 0x33, 0xb1, 0x73, 0x6d, 0x94, - 0x88, 0x5d, 0xce, 0xe5, 0xf7, 0x3f, 0xe7, 0x7f, 0xac, 0x0c, 0x3a, 0xec, 0xbe, 0x88, 0x09, 0x0b, - 0x4d, 0x1a, 0x31, 0xd3, 0x09, 0x43, 0xee, 0xb2, 0x80, 0x0a, 0x16, 0x06, 0xe6, 0x75, 0x93, 0xf6, - 0xa2, 0x0e, 0x6d, 0x9a, 0x1e, 0x04, 0xc0, 0xa9, 0x00, 0x97, 0x44, 0x3c, 0x14, 0x21, 0xae, 0x4f, - 0x08, 0x42, 0x23, 0x46, 0xe6, 0x09, 0x92, 0x11, 0xd5, 0x27, 0x1e, 0x13, 0x9d, 0x7e, 0x9b, 0x38, - 0xa1, 0x6f, 0x7a, 0xa1, 0x17, 0x9a, 0x0a, 0x6c, 0xf7, 0xaf, 0x54, 0xa4, 0x02, 0xf5, 0x6b, 0x22, - 0x58, 0x7d, 0xbc, 0x7e, 0x85, 0xe5, 0xe1, 0xd5, 0xa3, 0x59, 0xaf, 0x4f, 0x9d, 0x0e, 0x0b, 0x80, - 0x0f, 0xcc, 0xa8, 0xeb, 0xc9, 0x44, 0x6c, 0xfa, 0x20, 0xe8, 0x7d, 0x94, 0xb9, 0x8e, 0xe2, 0xfd, - 0x40, 0x30, 0x1f, 0x56, 0x80, 0xe7, 0x9b, 0x80, 0xd8, 0xe9, 0x80, 0x4f, 0x97, 0xb9, 0xc6, 0x4f, - 0x0d, 0x3d, 0x68, 0x01, 0x8d, 0xe1, 0x98, 0x06, 0x2e, 0x73, 0xa9, 0x00, 0xfc, 0x19, 0x15, 0xe5, - 0x5a, 0x2e, 0x15, 0x54, 0xd7, 0xea, 0xda, 0xc1, 0xff, 0x4f, 0x0f, 0xc9, 0xec, 0x82, 0x53, 0x75, - 0x12, 0x75, 0x3d, 0x99, 0x88, 0x89, 0xec, 0x26, 0xd7, 0x4d, 0xf2, 0xae, 0xfd, 0x05, 0x1c, 0x71, - 0x02, 0x82, 0x5a, 0x78, 0x38, 0xaa, 0xe5, 0x92, 0x51, 0x0d, 0xcd, 0x72, 0xf6, 0x54, 0x15, 0x5f, - 0xa0, 0x7c, 0x1c, 0x81, 0xa3, 0xef, 0x28, 0xf5, 0x23, 0xb2, 0xe9, 0xfb, 0x90, 0xc5, 0x0d, 0xcf, - 0x23, 0x70, 0xac, 0xbd, 0x74, 0x42, 0x5e, 0x46, 0xb6, 0xd2, 0x6b, 0xfc, 0xd0, 0x10, 0x5e, 0x6c, - 0x6d, 0xb1, 0x58, 0xe0, 0xcb, 0x15, 0x43, 0x64, 0x3b, 0x43, 0x92, 0x56, 0x76, 0x2a, 0xe9, 0xb0, - 0x62, 0x96, 0x99, 0x33, 0xf3, 0x01, 0x15, 0x98, 0x00, 0x3f, 0xd6, 0x77, 0xea, 0xbb, 0x4b, 0xb7, - 0xda, 0xca, 0x8d, 0x55, 0x4e, 0xc5, 0x0b, 0x6f, 0xa5, 0x8c, 0x3d, 0x51, 0x6b, 0x7c, 0xcb, 0x2f, - 0x7b, 0x91, 0x46, 0xb1, 0x89, 0x4a, 0x3d, 0x99, 0x3d, 0xa5, 0x3e, 0x28, 0x33, 0x25, 0xeb, 0x61, - 0xca, 0x97, 0x5a, 0x59, 0xc1, 0x9e, 0xf5, 0xe0, 0x8f, 0xa8, 0x18, 0xb1, 0xc0, 0x7b, 0xcf, 0x7c, - 0x48, 0xef, 0x6d, 0x6e, 0x67, 0xfe, 0x84, 0x39, 0x3c, 0x94, 0x98, 0xb5, 0x27, 0x9d, 0x9f, 0xa5, - 0x22, 0xf6, 0x54, 0x0e, 0x5f, 0xa2, 0x12, 0x87, 0x00, 0xbe, 0x2a, 0xed, 0xdd, 0x7f, 0xd3, 0x2e, - 0xcb, 0xc5, 0xed, 0x4c, 0xc5, 0x9e, 0x09, 0xe2, 0x97, 0xa8, 0xdc, 0x66, 0x01, 0xe5, 0x83, 0x0b, - 0xe0, 0x31, 0x0b, 0x03, 0x3d, 0xaf, 0xdc, 0x3e, 0x4a, 0xdd, 0x96, 0xad, 0xf9, 0xa2, 0xbd, 0xd8, - 0x8b, 0x5f, 0xa1, 0x0a, 0xf8, 0xfd, 0x9e, 0x3a, 0x7c, 0xc6, 0x17, 0x14, 0xaf, 0xa7, 0x7c, 0xe5, - 0xf5, 0x52, 0xdd, 0x5e, 0x21, 0xf0, 0x8d, 0x86, 0xf6, 0x23, 0x0e, 0x57, 0xc0, 0x39, 0xb8, 0xe7, - 0x42, 0xfe, 0x6f, 0x3c, 0x06, 0xb1, 0xfe, 0x5f, 0x7d, 0xf7, 0xa0, 0x64, 0x9d, 0x26, 0xa3, 0xda, - 0xfe, 0xd9, 0x6a, 0xf9, 0xcf, 0xa8, 0xf6, 0x6c, 0xfd, 0x03, 0x41, 0x8e, 0xb3, 0x18, 0x5c, 0xf5, - 0xc1, 0x52, 0x70, 0x60, 0xdf, 0x37, 0xca, 0x7a, 0x33, 0x1c, 0x1b, 0xb9, 0xdb, 0xb1, 0x91, 0xbb, - 0x1b, 0x1b, 0xb9, 0x9b, 0xc4, 0xd0, 0x86, 0x89, 0xa1, 0xdd, 0x26, 0x86, 0x76, 0x97, 0x18, 0xda, - 0xaf, 0xc4, 0xd0, 0xbe, 0xff, 0x36, 0x72, 0x9f, 0xea, 0x9b, 0xde, 0xc4, 0xbf, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x05, 0x28, 0x49, 0xd9, 0x36, 0x05, 0x00, 0x00, +var fileDescriptor_c1ec5c989d262916 = []byte{ + // 555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x8b, 0xd3, 0x4e, + 0x18, 0xc7, 0x9b, 0xdd, 0xf6, 0x47, 0x3b, 0xbf, 0xad, 0xd4, 0x01, 0x21, 0xf4, 0x90, 0x96, 0x9e, + 0x44, 0x70, 0x66, 0x77, 0x5d, 0x44, 0xf0, 0x96, 0xf5, 0x0f, 0x42, 0x57, 0x25, 0xab, 0x0b, 0xca, + 0x1e, 0x9c, 0x26, 0x8f, 0xe9, 0xd8, 0x26, 0x13, 0x92, 0xe9, 0x4a, 0x6f, 0xbe, 0x04, 0x5f, 0x56, + 0xf5, 0xb4, 0xc7, 0x3d, 0x15, 0x1b, 0xc1, 0x17, 0xe1, 0x49, 0x66, 0x9a, 0xf4, 0xaf, 0xa5, 0xc5, + 0x5b, 0xe7, 0x99, 0xe7, 0xf3, 0x99, 0xf9, 0x3e, 0x69, 0x82, 0x0e, 0x7b, 0x8f, 0x12, 0xc2, 0x05, + 0x65, 0x11, 0xa7, 0xae, 0x10, 0xb1, 0xc7, 0x43, 0x26, 0xb9, 0x08, 0xe9, 0xd5, 0x11, 0xeb, 0x47, + 0x5d, 0x76, 0x4c, 0x7d, 0x08, 0x21, 0x66, 0x12, 0x3c, 0x12, 0xc5, 0x42, 0x0a, 0xdc, 0x9c, 0x12, + 0x84, 0x45, 0x9c, 0x2c, 0x12, 0x24, 0x27, 0xea, 0xf7, 0x7d, 0x2e, 0xbb, 0x83, 0x0e, 0x71, 0x45, + 0x40, 0x7d, 0xe1, 0x0b, 0xaa, 0xc1, 0xce, 0xe0, 0xa3, 0x5e, 0xe9, 0x85, 0xfe, 0x35, 0x15, 0xd6, + 0xef, 0x6d, 0xbe, 0xc2, 0xea, 0xe1, 0xf5, 0x93, 0x79, 0x6f, 0xc0, 0xdc, 0x2e, 0x0f, 0x21, 0x1e, + 0xd2, 0xa8, 0xe7, 0xab, 0x42, 0x42, 0x03, 0x90, 0xec, 0x6f, 0x14, 0xdd, 0x44, 0xc5, 0x83, 0x50, + 0xf2, 0x00, 0xd6, 0x80, 0x87, 0xdb, 0x80, 0xc4, 0xed, 0x42, 0xc0, 0x56, 0xb9, 0xd6, 0x77, 0x03, + 0xdd, 0x6a, 0x03, 0x4b, 0xe0, 0x94, 0x85, 0x1e, 0xf7, 0x98, 0x04, 0xfc, 0x01, 0x95, 0xd5, 0xb5, + 0x3c, 0x26, 0x99, 0x69, 0x34, 0x8d, 0xbb, 0xff, 0x1f, 0x1f, 0x92, 0xf9, 0x04, 0x67, 0x76, 0x12, + 0xf5, 0x7c, 0x55, 0x48, 0x88, 0xea, 0x26, 0x57, 0x47, 0xe4, 0x55, 0xe7, 0x13, 0xb8, 0xf2, 0x0c, + 0x24, 0xb3, 0xf1, 0x68, 0xdc, 0x28, 0xa4, 0xe3, 0x06, 0x9a, 0xd7, 0x9c, 0x99, 0x15, 0x5f, 0xa0, + 0x62, 0x12, 0x81, 0x6b, 0xee, 0x69, 0xfb, 0x09, 0xd9, 0xf6, 0x7c, 0xc8, 0xf2, 0x0d, 0xcf, 0x23, + 0x70, 0xed, 0x83, 0xec, 0x84, 0xa2, 0x5a, 0x39, 0xda, 0xd7, 0xfa, 0x66, 0x20, 0xbc, 0xdc, 0xda, + 0xe6, 0x89, 0xc4, 0x97, 0x6b, 0x81, 0xc8, 0x6e, 0x81, 0x14, 0xad, 0xe3, 0xd4, 0xb2, 0xc3, 0xca, + 0x79, 0x65, 0x21, 0xcc, 0x5b, 0x54, 0xe2, 0x12, 0x82, 0xc4, 0xdc, 0x6b, 0xee, 0xaf, 0xcc, 0x6a, + 0xa7, 0x34, 0x76, 0x35, 0x93, 0x97, 0x5e, 0x28, 0x8d, 0x33, 0xb5, 0xb5, 0x7e, 0xed, 0xaf, 0x66, + 0x51, 0x41, 0x31, 0x45, 0x95, 0xbe, 0xaa, 0xbe, 0x64, 0x01, 0xe8, 0x30, 0x15, 0xfb, 0x76, 0xc6, + 0x57, 0xda, 0xf9, 0x86, 0x33, 0xef, 0xc1, 0xef, 0x50, 0x39, 0xe2, 0xa1, 0xff, 0x86, 0x07, 0x90, + 0xcd, 0x9b, 0xee, 0x16, 0xfe, 0x8c, 0xbb, 0xb1, 0x50, 0x98, 0x7d, 0xa0, 0x92, 0xbf, 0xce, 0x24, + 0xce, 0x4c, 0x87, 0x2f, 0x51, 0x25, 0x86, 0x10, 0x3e, 0x6b, 0xf7, 0xfe, 0xbf, 0xb9, 0xab, 0xea, + 0xe2, 0x4e, 0x6e, 0x71, 0xe6, 0x42, 0xfc, 0x18, 0x55, 0x3b, 0x3c, 0x64, 0xf1, 0xf0, 0x02, 0xe2, + 0x84, 0x8b, 0xd0, 0x2c, 0xea, 0xb4, 0x77, 0xb2, 0xb4, 0x55, 0x7b, 0x71, 0xd3, 0x59, 0xee, 0xc5, + 0x4f, 0x50, 0x0d, 0x82, 0x41, 0x5f, 0x0f, 0x3e, 0xe7, 0x4b, 0x9a, 0x37, 0x33, 0xbe, 0xf6, 0x74, + 0x65, 0xdf, 0x59, 0x23, 0xb0, 0x8b, 0xca, 0x89, 0x54, 0x6f, 0x8b, 0x3f, 0x34, 0xff, 0xd3, 0xf4, + 0xf3, 0xfc, 0x8f, 0x70, 0x9e, 0xd5, 0x7f, 0x8f, 0x1b, 0x0f, 0x36, 0x7f, 0x0d, 0xc8, 0x69, 0xbe, + 0x06, 0x4f, 0x3f, 0x9d, 0x1c, 0x73, 0x66, 0x62, 0xfb, 0xd9, 0x68, 0x62, 0x15, 0xae, 0x27, 0x56, + 0xe1, 0x66, 0x62, 0x15, 0xbe, 0xa4, 0x96, 0x31, 0x4a, 0x2d, 0xe3, 0x3a, 0xb5, 0x8c, 0x9b, 0xd4, + 0x32, 0x7e, 0xa4, 0x96, 0xf1, 0xf5, 0xa7, 0x55, 0x78, 0xdf, 0xdc, 0xf6, 0xd5, 0xfb, 0x13, 0x00, + 0x00, 0xff, 0xff, 0x7f, 0x15, 0x63, 0xd0, 0x18, 0x05, 0x00, 0x00, } func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) { @@ -290,15 +289,11 @@ func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.PreferredStrategies) > 0 { - for iNdEx := len(m.PreferredStrategies) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PreferredStrategies[iNdEx]) - copy(dAtA[i:], m.PreferredStrategies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreferredStrategies[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0x32 i -= len(m.EmulationVersion) copy(dAtA[i:], m.EmulationVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion))) @@ -402,12 +397,8 @@ func (m *LeaseCandidateSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EmulationVersion) n += 1 + l + sovGenerated(uint64(l)) - if len(m.PreferredStrategies) > 0 { - for _, s := range m.PreferredStrategies { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -454,7 +445,7 @@ func (this *LeaseCandidateSpec) String() string { `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`, `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`, - `PreferredStrategies:` + fmt.Sprintf("%v", this.PreferredStrategies) + `,`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, `}`, }, "") return s @@ -899,7 +890,7 @@ func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredStrategies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -927,7 +918,7 @@ func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferredStrategies = append(m.PreferredStrategies, k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])) + m.Strategy = k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto similarity index 79% rename from vendor/k8s.io/api/coordination/v1alpha1/generated.proto rename to vendor/k8s.io/api/coordination/v1alpha2/generated.proto index 57895ad56..7e56cd7f9 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto @@ -19,7 +19,7 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.coordination.v1alpha1; +package k8s.io.api.coordination.v1alpha2; import "k8s.io/api/coordination/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/coordination/v1alpha1"; +option go_package = "k8s.io/api/coordination/v1alpha2"; // LeaseCandidate defines a candidate for a Lease object. // Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. @@ -78,8 +78,8 @@ message LeaseCandidateSpec { optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3; // BinaryVersion is the binary version. It must be in a semver format without leading `v`. - // This field is required when strategy is "OldestEmulationVersion" - // +optional + // This field is required. + // +required optional string binaryVersion = 4; // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. @@ -88,18 +88,13 @@ message LeaseCandidateSpec { // +optional optional string emulationVersion = 5; - // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. - // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated - // leader election to make a decision about the final election strategy. This follows as - // - If all clients have strategy X as the first element in this list, strategy X will be used. - // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y - // will be used. - // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader - // election will not operate the Lease until resolved. + // Strategy is the strategy that coordinated leader election will use for picking the leader. + // If multiple candidates for the same Lease return different strategies, the strategy provided + // by the candidate with the latest BinaryVersion will be used. If there is still conflict, + // this is a user error and coordinated leader election will not operate the Lease until resolved. // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. // +featureGate=CoordinatedLeaderElection - // +listType=atomic // +required - repeated string preferredStrategies = 6; + optional string strategy = 6; } diff --git a/vendor/k8s.io/api/coordination/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1alpha2/register.go similarity index 95% rename from vendor/k8s.io/api/coordination/v1alpha1/register.go rename to vendor/k8s.io/api/coordination/v1alpha2/register.go index 6e57905a1..86bb8e0f2 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/register.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "coordination.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types.go b/vendor/k8s.io/api/coordination/v1alpha2/types.go similarity index 75% rename from vendor/k8s.io/api/coordination/v1alpha1/types.go rename to vendor/k8s.io/api/coordination/v1alpha2/types.go index 14066600c..2f53b097a 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/types.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/types.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/coordination/v1" @@ -23,7 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:introduced=1.32 // LeaseCandidate defines a candidate for a Lease object. // Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. @@ -61,31 +61,26 @@ type LeaseCandidateSpec struct { // +optional RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"` // BinaryVersion is the binary version. It must be in a semver format without leading `v`. - // This field is required when strategy is "OldestEmulationVersion" - // +optional - BinaryVersion string `json:"binaryVersion,omitempty" protobuf:"bytes,4,opt,name=binaryVersion"` + // This field is required. + // +required + BinaryVersion string `json:"binaryVersion" protobuf:"bytes,4,name=binaryVersion"` // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. // EmulationVersion must be less than or equal to BinaryVersion. // This field is required when strategy is "OldestEmulationVersion" // +optional EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"` - // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. - // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated - // leader election to make a decision about the final election strategy. This follows as - // - If all clients have strategy X as the first element in this list, strategy X will be used. - // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y - // will be used. - // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader - // election will not operate the Lease until resolved. + // Strategy is the strategy that coordinated leader election will use for picking the leader. + // If multiple candidates for the same Lease return different strategies, the strategy provided + // by the candidate with the latest BinaryVersion will be used. If there is still conflict, + // this is a user error and coordinated leader election will not operate the Lease until resolved. // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. // +featureGate=CoordinatedLeaderElection - // +listType=atomic // +required - PreferredStrategies []v1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty" protobuf:"bytes,6,opt,name=preferredStrategies"` + Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:introduced=1.32 // LeaseCandidateList is a list of Lease objects. type LeaseCandidateList struct { diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go similarity index 51% rename from vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go index 0e52809c8..39534e6ad 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -48,13 +48,13 @@ func (LeaseCandidateList) SwaggerDoc() map[string]string { } var map_LeaseCandidateSpec = map[string]string{ - "": "LeaseCandidateSpec is a specification of a Lease.", - "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", - "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", - "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", - "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"", - "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", - "preferredStrategies": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "": "LeaseCandidateSpec is a specification of a Lease.", + "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", + "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", + "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.", + "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", } func (LeaseCandidateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go similarity index 93% rename from vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go index 9cf15d21d..a20284797 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go @@ -19,10 +19,9 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1 "k8s.io/api/coordination/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -97,11 +96,6 @@ func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) { in, out := &in.RenewTime, &out.RenewTime *out = (*in).DeepCopy() } - if in.PreferredStrategies != nil { - in, out := &in.PreferredStrategies, &out.PreferredStrategies - *out = make([]v1.CoordinatedLeaseStrategy, len(*in)) - copy(*out, *in) - } return } diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go similarity index 96% rename from vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go rename to vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go index f42bef65c..a99b9ab5b 100644 --- a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go @@ -19,40 +19,40 @@ limitations under the License. // Code generated by prerelease-lifecycle-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) { - return 1, 31 + return 1, 32 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) { - return 1, 34 + return 1, 35 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) { - return 1, 37 + return 1, 38 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) { - return 1, 31 + return 1, 32 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) { - return 1, 34 + return 1, 35 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) { - return 1, 37 + return 1, 38 } diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 5cf6f329f..62e86402e 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -23,7 +23,7 @@ const ( // webhook backend fails. ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + // MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" // TolerationsAnnotationKey represents the key of tolerations data (json serialized) @@ -80,7 +80,7 @@ const ( // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - // annotation key prefix used to identify non-convertible json paths. + // NonConvertibleAnnotationPrefix is the annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" kubectlPrefix = "kubectl.kubernetes.io/" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 5654ee482..9d466c6d7 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -6758,1011 +6758,1015 @@ func init() { } var fileDescriptor_6c07b07c062484ab = []byte{ - // 16056 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x90, 0x1c, 0xd7, - 0x79, 0x18, 0xae, 0x9e, 0xd9, 0xf3, 0xdb, 0xfb, 0x2d, 0x8e, 0xc5, 0x12, 0xc0, 0x80, 0x4d, 0x12, - 0x04, 0xaf, 0x85, 0xc0, 0x43, 0x84, 0x48, 0x8a, 0xe6, 0x9e, 0xc0, 0x12, 0xd8, 0xc5, 0xf0, 0xcd, - 0x02, 0x90, 0x28, 0x4a, 0x56, 0x63, 0xe6, 0xed, 0x6e, 0x6b, 0x67, 0xba, 0x87, 0xdd, 0x3d, 0x0b, - 0x2c, 0x7e, 0x72, 0xd9, 0x96, 0x7f, 0x96, 0x2d, 0xd9, 0xa9, 0x52, 0xb9, 0x9c, 0x38, 0x25, 0xbb, - 0x5c, 0x29, 0xc7, 0xf1, 0x11, 0xc5, 0x4e, 0x14, 0x39, 0xb6, 0x63, 0xf9, 0xca, 0x55, 0x65, 0x27, - 0x29, 0xc7, 0x71, 0x55, 0x24, 0x57, 0x5c, 0x59, 0x47, 0xeb, 0x54, 0xb9, 0xfc, 0x47, 0x6c, 0x97, - 0x93, 0x3f, 0x92, 0x2d, 0x27, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x63, 0x66, 0x16, 0x04, 0x56, 0x14, - 0x8b, 0xff, 0xcd, 0xbc, 0xef, 0x7b, 0xdf, 0x7b, 0xfd, 0xce, 0xef, 0x7d, 0x27, 0xd8, 0x5b, 0x17, - 0xc3, 0x19, 0xd7, 0x3f, 0xef, 0x34, 0xdd, 0xf3, 0x55, 0x3f, 0x20, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf, - 0x20, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x66, 0x9a, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x19, 0xa7, - 0xe9, 0xce, 0x50, 0x9c, 0x99, 0xed, 0x0b, 0xd3, 0xcf, 0x6c, 0xb8, 0xd1, 0x66, 0xeb, 0xd6, 0x4c, - 0xd5, 0x6f, 0x9c, 0xdf, 0xf0, 0x37, 0xfc, 0xf3, 0x0c, 0xf5, 0x56, 0x6b, 0x9d, 0xfd, 0x63, 0x7f, - 0xd8, 0x2f, 0x4e, 0x62, 0xfa, 0xf9, 0xb8, 0x99, 0x86, 0x53, 0xdd, 0x74, 0x3d, 0x12, 0xec, 0x9c, - 0x6f, 0x6e, 0x6d, 0xb0, 0x76, 0x03, 0x12, 0xfa, 0xad, 0xa0, 0x4a, 0x92, 0x0d, 0xb7, 0xad, 0x15, - 0x9e, 0x6f, 0x90, 0xc8, 0xc9, 0xe8, 0xee, 0xf4, 0xf9, 0xbc, 0x5a, 0x41, 0xcb, 0x8b, 0xdc, 0x46, - 0xba, 0x99, 0x0f, 0x75, 0xaa, 0x10, 0x56, 0x37, 0x49, 0xc3, 0x49, 0xd5, 0x7b, 0x2e, 0xaf, 0x5e, - 0x2b, 0x72, 0xeb, 0xe7, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x86, 0x05, 0x67, 0x66, - 0x6f, 0x56, 0x16, 0xeb, 0x4e, 0x18, 0xb9, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, 0xaa, 0x44, 0x7e, 0x40, - 0x6e, 0xf8, 0xf5, 0x56, 0x83, 0x54, 0xd8, 0x40, 0xa0, 0xa7, 0x61, 0x60, 0x9b, 0xfd, 0x5f, 0x5e, - 0x98, 0xb2, 0xce, 0x58, 0xe7, 0x06, 0xe7, 0xc6, 0x7f, 0x67, 0xb7, 0xf4, 0x81, 0xbd, 0xdd, 0xd2, - 0xc0, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0b, 0x7d, 0xeb, 0xe1, 0xda, 0x4e, 0x93, 0x4c, 0x15, - 0x18, 0xee, 0xa8, 0xc0, 0xed, 0x5b, 0xaa, 0xd0, 0x52, 0x2c, 0xa0, 0xe8, 0x3c, 0x0c, 0x36, 0x9d, - 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8a, 0x67, 0xac, 0x73, 0xbd, 0x73, 0x13, 0x02, 0x75, 0xb0, - 0x2c, 0x01, 0x38, 0xc6, 0xa1, 0xdd, 0x08, 0x88, 0x53, 0xbb, 0xe6, 0xd5, 0x77, 0xa6, 0x7a, 0xce, - 0x58, 0xe7, 0x06, 0xe2, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xa5, 0x02, 0x0c, 0xcc, 0xae, - 0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa0, 0x1b, 0x30, 0xec, 0xf9, 0x35, 0x22, 0xff, 0xb3, 0xaf, 0x18, - 0x7a, 0xf6, 0xcc, 0x4c, 0x7a, 0x29, 0xcd, 0xac, 0x6a, 0x78, 0x73, 0xe3, 0x7b, 0xbb, 0xa5, 0x61, - 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xa1, 0xa6, 0x5f, 0x53, 0x64, 0x0b, 0x8c, 0x6c, 0x29, 0x8b, - 0x6c, 0x39, 0x46, 0x9b, 0x1b, 0xdb, 0xdb, 0x2d, 0x0d, 0x69, 0x05, 0x58, 0x27, 0x82, 0x6e, 0xc1, - 0x18, 0xfd, 0xeb, 0x45, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x1f, 0xc9, 0xa3, 0xab, 0xa1, 0xce, 0x4d, - 0xee, 0xed, 0x96, 0xc6, 0x12, 0x85, 0x38, 0x49, 0xd0, 0xfe, 0x61, 0x0b, 0xc6, 0x66, 0x9b, 0xcd, - 0xd9, 0xa0, 0xe1, 0x07, 0xe5, 0xc0, 0x5f, 0x77, 0xeb, 0x04, 0xbd, 0x08, 0x3d, 0x11, 0x9d, 0x35, - 0x3e, 0xc3, 0x8f, 0x88, 0xa1, 0xed, 0xa1, 0x73, 0xb5, 0xbf, 0x5b, 0x9a, 0x4c, 0xa0, 0xb3, 0xa9, - 0x64, 0x15, 0xd0, 0x6b, 0x30, 0x5e, 0xf7, 0xab, 0x4e, 0x7d, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, - 0x7f, 0x64, 0x6f, 0xb7, 0x34, 0x7e, 0x35, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x2e, 0x8c, 0xce, 0x46, - 0x91, 0x53, 0xdd, 0x24, 0x35, 0xbe, 0xa0, 0xd0, 0xf3, 0xd0, 0xe3, 0x39, 0x0d, 0xd9, 0x99, 0x33, - 0xb2, 0x33, 0xab, 0x4e, 0x83, 0x76, 0x66, 0xfc, 0xba, 0xe7, 0xbe, 0xdd, 0x12, 0x8b, 0x94, 0x96, - 0x61, 0x86, 0x8d, 0x9e, 0x05, 0xa8, 0x91, 0x6d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x29, 0xfa, 0x80, - 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x60, 0x70, 0x76, 0xdb, 0x77, 0x6b, 0x65, - 0xbf, 0x16, 0xa2, 0x2d, 0x18, 0x6b, 0x06, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xca, 0x3a, 0x53, 0x3c, - 0x37, 0xf4, 0xec, 0xb9, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x99, 0x3b, 0x2e, 0xda, - 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xaf, 0x0b, 0x70, 0x74, 0xf6, 0x6e, 0x2b, 0x20, 0x0b, - 0x6e, 0xb8, 0x95, 0xdc, 0x70, 0x35, 0x37, 0xdc, 0x5a, 0x8d, 0x47, 0x40, 0xad, 0xf4, 0x05, 0x51, - 0x8e, 0x15, 0x06, 0x7a, 0x06, 0xfa, 0xe9, 0xef, 0xeb, 0x78, 0x59, 0x7c, 0xf2, 0xa4, 0x40, 0x1e, - 0x5a, 0x70, 0x22, 0x67, 0x81, 0x83, 0xb0, 0xc4, 0x41, 0x2b, 0x30, 0x54, 0x65, 0xe7, 0xc3, 0xc6, - 0x8a, 0x5f, 0x23, 0x6c, 0x6d, 0x0d, 0xce, 0x3d, 0x45, 0xd1, 0xe7, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, - 0x14, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0xbb, 0xf7, 0x30, 0x4a, 0x90, - 0xb1, 0xd5, 0xcf, 0x69, 0x3b, 0xb7, 0x97, 0xed, 0xdc, 0xe1, 0xec, 0x5d, 0x8b, 0x2e, 0x40, 0xcf, - 0x96, 0xeb, 0xd5, 0xa6, 0xfa, 0x18, 0xad, 0x53, 0x74, 0xce, 0xaf, 0xb8, 0x5e, 0x6d, 0x7f, 0xb7, - 0x34, 0x61, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0x0f, 0x0b, 0x4a, 0x0c, 0xb6, 0xe4, 0xd6, - 0x49, 0x99, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x16, 0x20, 0x24, 0xd5, 0x80, - 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x51, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, - 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x22, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xa7, - 0xf3, 0x09, 0x7d, 0x04, 0xc6, 0xe2, 0xc6, 0xc2, 0xa6, 0x53, 0x95, 0x03, 0xc8, 0x76, 0x70, 0xc5, - 0x04, 0xe1, 0x24, 0xae, 0xfd, 0x0f, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, 0xd5, 0xfe, - 0x55, 0x0b, 0xfa, 0xe7, 0x5c, 0xaf, 0xe6, 0x7a, 0x1b, 0xe8, 0x53, 0x30, 0x40, 0xaf, 0xca, 0x9a, - 0x13, 0x39, 0xe2, 0x18, 0xfe, 0xa0, 0xb6, 0xb7, 0xd4, 0xcd, 0x35, 0xd3, 0xdc, 0xda, 0xa0, 0x05, - 0xe1, 0x0c, 0xc5, 0xa6, 0xbb, 0xed, 0xda, 0xad, 0x4f, 0x93, 0x6a, 0xb4, 0x42, 0x22, 0x27, 0xfe, - 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x0a, 0xf4, 0x45, 0x4e, 0xb0, 0x41, 0x22, 0x71, 0x1e, 0x67, - 0x9e, 0x9b, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x55, 0x12, 0xdf, 0x52, 0x6b, 0xac, 0x2a, 0x16, - 0x24, 0xec, 0xff, 0xd3, 0x0f, 0x27, 0xe6, 0x2b, 0xcb, 0x39, 0xeb, 0xea, 0x2c, 0xf4, 0xd5, 0x02, - 0x77, 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x05, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x84, 0x61, 0x7e, - 0x3f, 0x5e, 0x76, 0xbc, 0x5a, 0x7c, 0x3c, 0x0a, 0xec, 0xe1, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, - 0x5c, 0x54, 0x67, 0x13, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xbc, 0x05, 0xe3, 0xbc, 0x99, 0xd9, 0x28, - 0x0a, 0xdc, 0x5b, 0xad, 0x88, 0x84, 0x53, 0xbd, 0xec, 0xa4, 0x9b, 0xcf, 0x1a, 0xad, 0xdc, 0x11, - 0x98, 0xb9, 0x91, 0xa0, 0xc2, 0x0f, 0xc1, 0x29, 0xd1, 0xee, 0x78, 0x12, 0x8c, 0x53, 0xcd, 0xa2, - 0xef, 0xb3, 0x60, 0xba, 0xea, 0x7b, 0x51, 0xe0, 0xd7, 0xeb, 0x24, 0x28, 0xb7, 0x6e, 0xd5, 0xdd, - 0x70, 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xe9, - 0xbd, 0xdd, 0xd2, 0xf4, 0x7c, 0x2e, 0x29, 0xdc, 0xa6, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0x95, - 0xc8, 0xd9, 0x20, 0x71, 0xe3, 0xfd, 0xdd, 0x37, 0x7e, 0x6c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, - 0x70, 0x06, 0x59, 0xf4, 0x36, 0x1c, 0xa1, 0xa5, 0xa9, 0x6f, 0x1d, 0xe8, 0xbe, 0xb9, 0xa9, 0xbd, - 0xdd, 0xd2, 0x91, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x1e, 0x0b, 0x4e, 0xc4, 0x9f, 0xbf, - 0x78, 0xa7, 0xe9, 0x78, 0xb5, 0xb8, 0xe1, 0xc1, 0xee, 0x1b, 0xa6, 0x67, 0xf2, 0x89, 0xf9, 0x3c, - 0x4a, 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x24, 0xed, 0x5a, 0xb2, 0x6d, 0xe8, 0xbe, 0xed, 0xe3, 0x7b, - 0xbb, 0xa5, 0xc9, 0xd5, 0x34, 0x0d, 0x9c, 0x45, 0x78, 0x7a, 0x1e, 0x8e, 0x66, 0xae, 0x4e, 0x34, - 0x0e, 0xc5, 0x2d, 0xc2, 0x99, 0xc0, 0x41, 0x4c, 0x7f, 0xa2, 0x23, 0xd0, 0xbb, 0xed, 0xd4, 0x5b, - 0x62, 0x63, 0x62, 0xfe, 0xe7, 0xa5, 0xc2, 0x45, 0xcb, 0xfe, 0x37, 0x45, 0x18, 0x9b, 0xaf, 0x2c, - 0xdf, 0xd3, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, 0x97, 0xe8, - 0x77, 0x67, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9c, 0xb3, 0x65, 0xef, 0xf3, 0x46, 0xdd, 0xce, - 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x03, 0x2e, 0xdd, - 0xfb, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x2d, 0xb7, 0xee, 0x46, 0x2e, 0x09, 0xd1, - 0xe3, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdd, 0xdb, 0x2d, 0x15, 0x67, 0x6b, - 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x83, 0x29, 0x06, 0x7a, 0x12, 0x7a, 0x6a, 0x81, 0xdf, 0x9c, 0x2a, - 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x6f, 0x17, 0xe0, - 0xe4, 0x3c, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0xee, 0x8b, 0x73, 0x30, 0xd0, 0xf0, 0x3d, 0x37, 0xf2, - 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x40, 0x4f, 0x33, 0x66, - 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, 0xc5, 0x28, - 0x8c, 0xeb, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x9c, 0x00, 0xe5, 0x11, 0xc4, 0x8d, 0x90, 0xe0, 0x04, - 0x28, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x83, 0x61, 0x62, 0x66, 0xbb, 0xda, 0x9a, 0x23, 0x8c, 0x55, - 0x50, 0x33, 0x19, 0x13, 0x31, 0x6e, 0xb0, 0xbe, 0x8e, 0xac, 0xc2, 0xd7, 0x0a, 0x80, 0xf8, 0x10, - 0x7e, 0x9b, 0x0d, 0xdc, 0xf5, 0xf4, 0xc0, 0x75, 0xbf, 0x25, 0xee, 0xd7, 0xe8, 0xfd, 0x4f, 0x0b, - 0x4e, 0xce, 0xbb, 0x5e, 0x8d, 0x04, 0x39, 0x0b, 0xf0, 0xc1, 0x3c, 0xe5, 0x0f, 0xc6, 0xa4, 0x18, - 0x4b, 0xac, 0xe7, 0x3e, 0x2c, 0x31, 0xfb, 0x2f, 0x2c, 0x40, 0xfc, 0xb3, 0xdf, 0x75, 0x1f, 0x7b, - 0x3d, 0xfd, 0xb1, 0xf7, 0x61, 0x59, 0xd8, 0x57, 0x61, 0x74, 0xbe, 0xee, 0x12, 0x2f, 0x5a, 0x2e, - 0xcf, 0xfb, 0xde, 0xba, 0xbb, 0x81, 0x5e, 0x82, 0xd1, 0xc8, 0x6d, 0x10, 0xbf, 0x15, 0x55, 0x48, - 0xd5, 0xf7, 0xd8, 0xcb, 0xd5, 0x3a, 0xd7, 0x3b, 0x87, 0xf6, 0x76, 0x4b, 0xa3, 0x6b, 0x06, 0x04, - 0x27, 0x30, 0xed, 0x9f, 0xa1, 0xe7, 0x56, 0xbd, 0x15, 0x46, 0x24, 0x58, 0x0b, 0x5a, 0x61, 0x34, - 0xd7, 0xa2, 0xbc, 0x67, 0x39, 0xf0, 0x69, 0x77, 0x5c, 0xdf, 0x43, 0x27, 0x8d, 0xe7, 0xf8, 0x80, - 0x7c, 0x8a, 0x8b, 0x67, 0xf7, 0x0c, 0x40, 0xe8, 0x6e, 0x78, 0x24, 0xd0, 0x9e, 0x0f, 0xa3, 0x6c, - 0xab, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x3a, 0x8c, 0xd4, 0x9d, 0x5b, 0xa4, 0x5e, 0x21, 0x75, 0x52, - 0x8d, 0xfc, 0x40, 0xc8, 0x37, 0x9e, 0xeb, 0xee, 0x1d, 0x70, 0x55, 0xaf, 0x3a, 0x37, 0xb1, 0xb7, - 0x5b, 0x1a, 0x31, 0x8a, 0xb0, 0x49, 0x9c, 0x1e, 0x1d, 0x7e, 0x93, 0x7e, 0x85, 0x53, 0xd7, 0x1f, - 0x9f, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x75, 0x74, 0xf4, 0xe4, 0x1d, 0x1d, 0xf6, 0x1f, 0xd1, 0x85, - 0xe6, 0x37, 0x9a, 0xbe, 0x47, 0xbc, 0x68, 0xde, 0xf7, 0x6a, 0x5c, 0x32, 0xf5, 0x92, 0x21, 0x3a, - 0x39, 0x9b, 0x10, 0x9d, 0x1c, 0x4b, 0xd7, 0xd0, 0xa4, 0x27, 0x1f, 0x86, 0xbe, 0x30, 0x72, 0xa2, - 0x56, 0x28, 0x06, 0xee, 0x61, 0xb9, 0xec, 0x2a, 0xac, 0x74, 0x7f, 0xb7, 0x34, 0xa6, 0xaa, 0xf1, - 0x22, 0x2c, 0x2a, 0xa0, 0x27, 0xa0, 0xbf, 0x41, 0xc2, 0xd0, 0xd9, 0x90, 0x6c, 0xc3, 0x98, 0xa8, - 0xdb, 0xbf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x47, 0xa0, 0x97, 0x04, 0x81, 0x1f, 0x88, 0x6f, 0x1b, - 0x11, 0x88, 0xbd, 0x8b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x18, 0x53, 0x7d, 0xe5, 0x6d, - 0x1d, 0xc2, 0x73, 0xed, 0x4d, 0x80, 0xaa, 0xfc, 0xc0, 0x90, 0x5d, 0xb3, 0x43, 0xcf, 0x9e, 0xcd, - 0xe4, 0x68, 0x52, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, 0xbf, 0x61, 0xc1, 0x64, - 0xe2, 0x8b, 0xae, 0xba, 0x61, 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x4c, 0x97, 0x8b, 0xcf, 0x0d, 0xf9, - 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x0c, 0xbd, 0x6e, 0x44, 0x1a, 0xf2, 0x63, 0x1e, - 0x69, 0xfb, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x2c, 0xd3, 0x9a, 0x98, 0x13, 0xb0, 0x7f, 0xbb, 0x08, - 0x83, 0x7c, 0x7f, 0xaf, 0x38, 0xcd, 0x43, 0x98, 0x8b, 0xa7, 0x60, 0xd0, 0x6d, 0x34, 0x5a, 0x91, - 0x73, 0x4b, 0xdc, 0x7b, 0x03, 0xfc, 0x0c, 0x5a, 0x96, 0x85, 0x38, 0x86, 0xa3, 0x65, 0xe8, 0x61, - 0x5d, 0xe1, 0x5f, 0xf9, 0x78, 0xf6, 0x57, 0x8a, 0xbe, 0xcf, 0x2c, 0x38, 0x91, 0xc3, 0x59, 0x4e, - 0xb5, 0xaf, 0x68, 0x11, 0x66, 0x24, 0x90, 0x03, 0x70, 0xcb, 0xf5, 0x9c, 0x60, 0x87, 0x96, 0x4d, - 0x15, 0x19, 0xc1, 0x67, 0xda, 0x13, 0x9c, 0x53, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, 0x80, 0x35, - 0xa2, 0xd3, 0x2f, 0xc2, 0xa0, 0x42, 0x3e, 0x08, 0xe7, 0x38, 0xfd, 0x11, 0x18, 0x4b, 0xb4, 0xd5, - 0xa9, 0xfa, 0xb0, 0xce, 0x78, 0xfe, 0x1a, 0x3b, 0x32, 0x44, 0xaf, 0x17, 0xbd, 0x6d, 0x71, 0x37, - 0xdd, 0x85, 0x23, 0xf5, 0x8c, 0x23, 0x5f, 0xcc, 0x6b, 0xf7, 0x57, 0xc4, 0x49, 0xf1, 0xd9, 0x47, - 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0xe3, 0x44, 0x2c, 0xb4, 0x3b, 0x11, 0xe9, 0x79, 0x77, 0x44, 0x75, - 0xfe, 0x0a, 0xd9, 0x51, 0x87, 0xea, 0xb7, 0xb2, 0xfb, 0xa7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x48, - 0x10, 0x28, 0x5e, 0x21, 0x3b, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x58, 0x30, - 0xa2, 0xbe, 0xee, 0x10, 0xce, 0x85, 0x39, 0xf3, 0x5c, 0x38, 0xd5, 0x76, 0x81, 0xe7, 0x9c, 0x08, - 0x5f, 0x2b, 0xc0, 0x09, 0x85, 0x43, 0x1f, 0x51, 0xfc, 0x8f, 0x58, 0x55, 0xe7, 0x61, 0xd0, 0x53, - 0xe2, 0x44, 0xcb, 0x94, 0xe3, 0xc5, 0xc2, 0xc4, 0x18, 0x87, 0x5e, 0x79, 0x5e, 0x7c, 0x69, 0x0f, - 0xeb, 0x72, 0x76, 0x71, 0xb9, 0xcf, 0x41, 0xb1, 0xe5, 0xd6, 0xc4, 0x05, 0xf3, 0x41, 0x39, 0xda, - 0xd7, 0x97, 0x17, 0xf6, 0x77, 0x4b, 0x0f, 0xe7, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0x5c, 0x5f, - 0x5e, 0xc0, 0xb4, 0x32, 0x9a, 0x85, 0x31, 0xa9, 0x55, 0xbb, 0x41, 0xf9, 0x52, 0xdf, 0x13, 0xf7, - 0x90, 0x12, 0x96, 0x63, 0x13, 0x8c, 0x93, 0xf8, 0x68, 0x01, 0xc6, 0xb7, 0x5a, 0xb7, 0x48, 0x9d, - 0x44, 0xfc, 0x83, 0xaf, 0x10, 0x2e, 0x4a, 0x1e, 0x8c, 0x9f, 0xb0, 0x57, 0x12, 0x70, 0x9c, 0xaa, - 0x61, 0xff, 0x0d, 0xbb, 0x0f, 0xc4, 0xe8, 0x69, 0xfc, 0xcd, 0xb7, 0x72, 0x39, 0x77, 0xb3, 0x2a, - 0xae, 0x90, 0x9d, 0x35, 0x9f, 0xf2, 0x21, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x3d, 0x6d, 0xd7, 0xfc, - 0x2f, 0x15, 0xe0, 0xa8, 0x1a, 0x01, 0x83, 0x5b, 0xfe, 0x76, 0x1f, 0x83, 0x0b, 0x30, 0x54, 0x23, - 0xeb, 0x4e, 0xab, 0x1e, 0x29, 0xbd, 0x46, 0x2f, 0x57, 0xb5, 0x2d, 0xc4, 0xc5, 0x58, 0xc7, 0x39, - 0xc0, 0xb0, 0xfd, 0xc2, 0x08, 0xbb, 0x88, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, - 0xf3, 0x08, 0xf4, 0xba, 0x0d, 0xca, 0x98, 0x15, 0x4c, 0x7e, 0x6b, 0x99, 0x16, 0x62, 0x0e, 0x43, - 0x8f, 0x41, 0x7f, 0xd5, 0x6f, 0x34, 0x1c, 0xaf, 0xc6, 0xae, 0xbc, 0xc1, 0xb9, 0x21, 0xca, 0xbb, - 0xcd, 0xf3, 0x22, 0x2c, 0x61, 0x94, 0xf9, 0x76, 0x82, 0x0d, 0x2e, 0xec, 0x11, 0xcc, 0xf7, 0x6c, - 0xb0, 0x11, 0x62, 0x56, 0x4a, 0xdf, 0xaa, 0xb7, 0xfd, 0x60, 0xcb, 0xf5, 0x36, 0x16, 0xdc, 0x40, - 0x6c, 0x09, 0x75, 0x17, 0xde, 0x54, 0x10, 0xac, 0x61, 0xa1, 0x25, 0xe8, 0x6d, 0xfa, 0x41, 0x14, - 0x4e, 0xf5, 0xb1, 0xe1, 0x7e, 0x38, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0xf6, 0x83, 0x28, 0xfe, 0x00, - 0xfa, 0x2f, 0xc4, 0xbc, 0x3a, 0xba, 0x0a, 0xfd, 0xc4, 0xdb, 0x5e, 0x0a, 0xfc, 0xc6, 0xd4, 0x64, - 0x3e, 0xa5, 0x45, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, 0x2c, 0x49, 0xa0, 0x0f, 0x43, - 0x91, 0x78, 0xdb, 0x53, 0xfd, 0x8c, 0xd2, 0x74, 0x0e, 0xa5, 0x1b, 0x4e, 0x10, 0x9f, 0xf9, 0x8b, - 0xde, 0x36, 0xa6, 0x75, 0xd0, 0xc7, 0x60, 0x50, 0x1e, 0x18, 0xa1, 0x90, 0xa2, 0x66, 0x2e, 0x58, - 0x79, 0xcc, 0x60, 0xf2, 0x76, 0xcb, 0x0d, 0x48, 0x83, 0x78, 0x51, 0x18, 0x9f, 0x90, 0x12, 0x1a, - 0xe2, 0x98, 0x1a, 0xaa, 0xc2, 0x70, 0x40, 0x42, 0xf7, 0x2e, 0x29, 0xfb, 0x75, 0xb7, 0xba, 0x33, - 0x75, 0x9c, 0x75, 0xef, 0x89, 0xb6, 0x43, 0x86, 0xb5, 0x0a, 0xb1, 0x94, 0x5f, 0x2f, 0xc5, 0x06, - 0x51, 0xf4, 0x06, 0x8c, 0x04, 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xad, 0x4c, 0x29, 0xad, 0xdc, 0x08, - 0xd6, 0x01, 0xfc, 0x39, 0x11, 0x37, 0x13, 0x43, 0xb0, 0x49, 0x01, 0x7d, 0x4c, 0xaa, 0x1c, 0x56, - 0xfc, 0x96, 0x17, 0x85, 0x53, 0x83, 0xac, 0xdf, 0x99, 0xba, 0xe9, 0x1b, 0x31, 0x5e, 0x52, 0x27, - 0xc1, 0x2b, 0x63, 0x83, 0x14, 0xfa, 0x04, 0x8c, 0xf0, 0xff, 0x5c, 0xa5, 0x1a, 0x4e, 0x1d, 0x65, - 0xb4, 0xcf, 0xe4, 0xd3, 0xe6, 0x88, 0x73, 0x47, 0x05, 0xf1, 0x11, 0xbd, 0x34, 0xc4, 0x26, 0x35, - 0x84, 0x61, 0xa4, 0xee, 0x6e, 0x13, 0x8f, 0x84, 0x61, 0x39, 0xf0, 0x6f, 0x11, 0x21, 0x21, 0x3e, - 0x91, 0xad, 0x82, 0xf5, 0x6f, 0x11, 0xf1, 0x08, 0xd4, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x0e, 0xa3, - 0xf4, 0x49, 0xee, 0xc6, 0x44, 0x87, 0x3a, 0x11, 0x65, 0x0f, 0x67, 0x6c, 0x54, 0xc2, 0x09, 0x22, - 0xe8, 0x1a, 0x0c, 0xb3, 0x31, 0x6f, 0x35, 0x39, 0xd1, 0x63, 0x9d, 0x88, 0x32, 0x83, 0x82, 0x8a, - 0x56, 0x05, 0x1b, 0x04, 0xd0, 0xeb, 0x30, 0x58, 0x77, 0xd7, 0x49, 0x75, 0xa7, 0x5a, 0x27, 0x53, - 0xc3, 0x8c, 0x5a, 0xe6, 0x61, 0x78, 0x55, 0x22, 0x71, 0xfe, 0x5c, 0xfd, 0xc5, 0x71, 0x75, 0x74, - 0x03, 0x8e, 0x45, 0x24, 0x68, 0xb8, 0x9e, 0x43, 0x0f, 0x31, 0xf1, 0x24, 0x64, 0x9a, 0xf1, 0x11, - 0xb6, 0xba, 0x4e, 0x8b, 0xd9, 0x38, 0xb6, 0x96, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0x3b, 0x30, 0x95, - 0x01, 0xe1, 0xeb, 0xf6, 0x08, 0xa3, 0xfc, 0x8a, 0xa0, 0x3c, 0xb5, 0x96, 0x83, 0xb7, 0xdf, 0x06, - 0x86, 0x73, 0xa9, 0xa3, 0x6b, 0x30, 0xc6, 0x4e, 0xce, 0x72, 0xab, 0x5e, 0x17, 0x0d, 0x8e, 0xb2, - 0x06, 0x1f, 0x93, 0x7c, 0xc4, 0xb2, 0x09, 0xde, 0xdf, 0x2d, 0x41, 0xfc, 0x0f, 0x27, 0x6b, 0xa3, - 0x5b, 0x4c, 0x09, 0xdb, 0x0a, 0xdc, 0x68, 0x87, 0xee, 0x2a, 0x72, 0x27, 0x9a, 0x1a, 0x6b, 0x2b, - 0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3, 0x9a, 0xeb, - 0x4d, 0x8d, 0xf3, 0xf7, 0x94, 0x3c, 0x49, 0x2b, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xc0, 0xd2, 0x1f, - 0xd7, 0xe8, 0x8d, 0x3b, 0xc1, 0x10, 0x63, 0x05, 0xac, 0x04, 0xe0, 0x18, 0x87, 0x32, 0xc1, 0x51, - 0xb4, 0x33, 0x85, 0x18, 0xaa, 0x3a, 0x10, 0xd7, 0xd6, 0x3e, 0x86, 0x69, 0xb9, 0x7d, 0x0b, 0x46, - 0xd5, 0x31, 0xc1, 0xc6, 0x04, 0x95, 0xa0, 0x97, 0xb1, 0x7d, 0x42, 0x7c, 0x3a, 0x48, 0xbb, 0xc0, - 0x58, 0x42, 0xcc, 0xcb, 0x59, 0x17, 0xdc, 0xbb, 0x64, 0x6e, 0x27, 0x22, 0x5c, 0x16, 0x51, 0xd4, - 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x9f, 0xe3, 0x5b, 0xa2, 0x8b, 0x7b, 0xf1, - 0x69, 0x18, 0x60, 0x86, 0x1f, 0x7e, 0xc0, 0xb5, 0xb3, 0xbd, 0x31, 0xc3, 0x7c, 0x59, 0x94, 0x63, - 0x85, 0x81, 0x5e, 0x86, 0x91, 0xaa, 0xde, 0x80, 0xb8, 0xd4, 0xd5, 0x31, 0x62, 0xb4, 0x8e, 0x4d, - 0x5c, 0x74, 0x11, 0x06, 0x98, 0x8d, 0x53, 0xd5, 0xaf, 0x0b, 0x6e, 0x53, 0x72, 0x26, 0x03, 0x65, - 0x51, 0xbe, 0xaf, 0xfd, 0xc6, 0x0a, 0x1b, 0x9d, 0x85, 0x3e, 0xda, 0x85, 0xe5, 0xb2, 0xb8, 0x4e, - 0x95, 0x24, 0xf0, 0x32, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x86, 0xc5, 0x78, 0xa9, 0xf4, 0x99, 0x8f, - 0x2e, 0xb3, 0x4b, 0x83, 0xdd, 0x20, 0x9a, 0x16, 0xfe, 0x51, 0xed, 0x26, 0x50, 0xb0, 0xfd, 0xc4, - 0x7f, 0x6c, 0xd4, 0x44, 0x6f, 0x26, 0x6f, 0x06, 0xce, 0x50, 0x3c, 0x2f, 0x87, 0x20, 0x79, 0x3b, - 0x3c, 0x14, 0x5f, 0x71, 0xb4, 0x3f, 0xed, 0xae, 0x08, 0xfb, 0x47, 0x0a, 0xda, 0x2a, 0xa9, 0x44, - 0x4e, 0x44, 0x50, 0x19, 0xfa, 0x6f, 0x3b, 0x6e, 0xe4, 0x7a, 0x1b, 0x82, 0xef, 0x6b, 0x7f, 0xd1, - 0xb1, 0x4a, 0x37, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0x96, 0xe7, - 0x51, 0x8a, 0x85, 0x6e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0xde, 0x02, - 0x90, 0x27, 0x04, 0xa9, 0x09, 0xd9, 0xe1, 0xd3, 0x9d, 0x89, 0xae, 0xa9, 0x3a, 0x5c, 0x38, 0x19, - 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x38, 0xdd, 0xa2, 0x4e, 0x10, - 0x91, 0xda, 0x6c, 0x24, 0x06, 0xe7, 0xc9, 0xee, 0x1e, 0x87, 0x6b, 0x6e, 0x83, 0xe8, 0xdb, 0x59, - 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x57, 0x8a, 0x30, 0x95, 0xd7, 0x5d, 0xba, 0x69, 0xc8, 0x1d, 0x37, - 0x9a, 0xa7, 0x6c, 0xad, 0x65, 0x6e, 0x9a, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x5d, 0xbd, 0xa1, 0xbb, - 0x21, 0xdf, 0xf6, 0xbd, 0xf1, 0xea, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x20, 0x4e, 0x28, - 0x8c, 0xef, 0xb4, 0x55, 0x8e, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xca, 0xd8, 0xd3, 0x41, 0xca, 0x68, - 0x0c, 0x51, 0xef, 0xfd, 0x1d, 0x22, 0xf4, 0x49, 0x80, 0x75, 0xd7, 0x73, 0xc3, 0x4d, 0x46, 0xbd, - 0xef, 0xc0, 0xd4, 0x15, 0x53, 0xbc, 0xa4, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x86, 0xd4, 0x01, - 0xb2, 0xbc, 0xc0, 0x54, 0xff, 0x9a, 0x29, 0x55, 0x7c, 0x9a, 0x2e, 0x60, 0x1d, 0xcf, 0xfe, 0x74, - 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xed, 0xf8, 0x16, 0xda, 0x8f, 0xaf, 0xfd, 0x63, - 0x83, 0x30, 0x66, 0x34, 0xd6, 0x0a, 0xbb, 0x38, 0x73, 0x2f, 0xd1, 0x0b, 0xc8, 0x89, 0x88, 0xd8, - 0x7f, 0x76, 0xe7, 0xad, 0xa2, 0x5f, 0x52, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x93, 0x30, 0x58, 0x77, - 0x42, 0x26, 0xb1, 0x24, 0x62, 0xdf, 0x75, 0x43, 0x2c, 0x7e, 0x10, 0x3a, 0x61, 0xa4, 0xdd, 0xfa, - 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x94, 0x94, 0xbf, 0x92, 0xd6, 0x9d, 0xaa, 0x13, 0x94, 0x09, 0xdb, - 0xc1, 0x1c, 0x86, 0x2e, 0xb2, 0xa3, 0x95, 0xae, 0x8a, 0x79, 0xca, 0x8d, 0xb2, 0x65, 0xd6, 0x6b, - 0x30, 0xd9, 0x0a, 0x86, 0x0d, 0xcc, 0xf8, 0x4d, 0xd6, 0xd7, 0xe6, 0x4d, 0xf6, 0x04, 0xf4, 0xb3, - 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0x93, 0x0b, 0x66, 0xa0, 0xbb, 0x05, - 0x43, 0x5f, 0x7d, 0x62, 0x51, 0x33, 0xb3, 0x8b, 0x01, 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0x0c, - 0xfd, 0xac, 0x05, 0xc8, 0xa9, 0xd3, 0xd7, 0x32, 0x2d, 0x56, 0x8f, 0x1b, 0x60, 0xac, 0xf6, 0xcb, - 0x1d, 0x87, 0xbd, 0x15, 0xce, 0xcc, 0xa6, 0x6a, 0x73, 0x49, 0xe9, 0x4b, 0xa2, 0x8b, 0x28, 0x8d, - 0xa0, 0x5f, 0x46, 0x57, 0xdd, 0x30, 0xfa, 0xec, 0x1f, 0x27, 0x2e, 0xa7, 0x8c, 0x2e, 0xa1, 0xeb, - 0xfa, 0xe3, 0x6b, 0xe8, 0x80, 0x8f, 0xaf, 0x91, 0xdc, 0x87, 0xd7, 0x77, 0x26, 0x1e, 0x30, 0xc3, - 0xec, 0xcb, 0x1f, 0xeb, 0xf0, 0x80, 0x11, 0xe2, 0xf4, 0x6e, 0x9e, 0x31, 0x65, 0xa1, 0x07, 0x1e, - 0x61, 0x5d, 0x6e, 0xff, 0x08, 0xbe, 0x1e, 0x92, 0x60, 0xee, 0x84, 0x54, 0x13, 0xef, 0xeb, 0xbc, - 0x87, 0xa6, 0x37, 0xfe, 0x1e, 0x0b, 0xa6, 0xd2, 0x03, 0xc4, 0xbb, 0x34, 0x35, 0xca, 0xfa, 0x6f, - 0xb7, 0x1b, 0x19, 0xd1, 0x79, 0x69, 0xee, 0x3a, 0x35, 0x9b, 0x43, 0x0b, 0xe7, 0xb6, 0x32, 0xdd, - 0x82, 0xe3, 0x39, 0xf3, 0x9e, 0x21, 0xb5, 0x5e, 0xd0, 0xa5, 0xd6, 0x1d, 0x64, 0x9d, 0x33, 0x72, - 0x66, 0x66, 0xde, 0x68, 0x39, 0x5e, 0xe4, 0x46, 0x3b, 0xba, 0x94, 0xdb, 0x03, 0x73, 0x40, 0xd0, - 0x27, 0xa0, 0xb7, 0xee, 0x7a, 0xad, 0x3b, 0xe2, 0xa6, 0x3c, 0x9b, 0xfd, 0x88, 0xf1, 0x5a, 0x77, - 0xcc, 0x21, 0x2e, 0xd1, 0x0d, 0xc9, 0xca, 0xf7, 0x77, 0x4b, 0x28, 0x8d, 0x80, 0x39, 0x55, 0xfb, - 0x49, 0x18, 0x5d, 0x70, 0x48, 0xc3, 0xf7, 0x16, 0xbd, 0x5a, 0xd3, 0x77, 0xbd, 0x08, 0x4d, 0x41, - 0x0f, 0x63, 0x11, 0xf9, 0x05, 0xd9, 0x43, 0x87, 0x10, 0xb3, 0x12, 0x7b, 0x03, 0x8e, 0x2e, 0xf8, - 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9b, 0x2d, 0x2f, 0x6b, 0x52, 0xbf, 0x55, 0x29, 0x75, 0xb2, 0xf2, - 0xdf, 0xf4, 0x5a, 0x4d, 0xbe, 0x94, 0x96, 0xdc, 0x3a, 0xc9, 0x91, 0xcd, 0xfe, 0x58, 0xc1, 0x68, - 0x29, 0xc6, 0x57, 0x9a, 0x45, 0x2b, 0xd7, 0x28, 0xe1, 0x0d, 0x18, 0x58, 0x77, 0x49, 0xbd, 0x86, - 0xc9, 0xba, 0x98, 0x8d, 0xc7, 0xf3, 0xcd, 0x16, 0x97, 0x28, 0xa6, 0x52, 0x81, 0x32, 0x99, 0xd5, - 0x92, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x05, 0xe3, 0x72, 0xce, 0x24, 0x54, 0x9c, 0xda, 0x4f, 0xb4, - 0x5b, 0x84, 0x26, 0x71, 0x66, 0xc2, 0x8d, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x3a, 0x09, 0x3d, 0x0d, - 0xca, 0x9f, 0xf4, 0xb0, 0xe1, 0x67, 0x42, 0x2a, 0x26, 0x6f, 0x63, 0xa5, 0xf6, 0x4f, 0x58, 0x70, - 0x3c, 0x35, 0x32, 0x42, 0xee, 0x78, 0x9f, 0x67, 0x21, 0x29, 0x07, 0x2c, 0x74, 0x96, 0x03, 0xda, - 0xff, 0xc8, 0x82, 0x23, 0x8b, 0x8d, 0x66, 0xb4, 0xb3, 0xe0, 0x9a, 0x16, 0x04, 0x2f, 0x42, 0x5f, - 0x83, 0xd4, 0xdc, 0x56, 0x43, 0xcc, 0x5c, 0x49, 0xde, 0xe1, 0x2b, 0xac, 0x94, 0x9e, 0x03, 0x95, - 0xc8, 0x0f, 0x9c, 0x0d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x9c, 0x90, 0x7b, 0x97, 0x5c, 0x75, 0x1b, - 0x6e, 0x74, 0x6f, 0xbb, 0x4b, 0x28, 0xff, 0x25, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0x98, - 0x5c, 0xf7, 0xb3, 0xb5, 0x5a, 0x40, 0xc2, 0x10, 0x4d, 0x43, 0xc1, 0x6d, 0x8a, 0x5e, 0x82, 0xe8, - 0x65, 0x61, 0xb9, 0x8c, 0x0b, 0x6e, 0x53, 0x3e, 0xba, 0x18, 0x9b, 0x50, 0x34, 0xed, 0x20, 0x2e, - 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x39, 0x18, 0xf0, 0xfc, 0x1a, 0x7f, 0xb7, 0x08, 0x4d, 0x38, 0xc5, - 0x5c, 0x15, 0x65, 0x58, 0x41, 0x51, 0x19, 0x06, 0xb9, 0x95, 0x6c, 0xbc, 0x68, 0xbb, 0xb2, 0xb5, - 0x65, 0x5f, 0xb6, 0x26, 0x6b, 0xe2, 0x98, 0x88, 0xfd, 0x5b, 0x16, 0x0c, 0xcb, 0x2f, 0xeb, 0xf2, - 0x45, 0x49, 0xb7, 0x56, 0xfc, 0x9a, 0x8c, 0xb7, 0x16, 0x7d, 0x11, 0x32, 0x88, 0xf1, 0x10, 0x2c, - 0x1e, 0xe8, 0x21, 0x78, 0x01, 0x86, 0x9c, 0x66, 0xb3, 0x6c, 0xbe, 0x22, 0xd9, 0x52, 0x9a, 0x8d, - 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x78, 0x01, 0x46, 0xe5, 0x17, 0x54, 0x5a, 0xb7, 0x42, 0x12, 0xa1, - 0x35, 0x18, 0x74, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x1f, 0xc9, 0x96, 0x6e, 0x1a, 0x53, 0x1a, 0xb3, - 0xc3, 0xb3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x3a, 0x4c, 0x78, 0x7e, 0xc4, 0x58, 0x23, 0x05, 0x6f, - 0xa7, 0x70, 0x4e, 0x52, 0x3f, 0x21, 0xa8, 0x4f, 0xac, 0x26, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x28, - 0x25, 0xc6, 0xc5, 0x7c, 0x51, 0x9f, 0x3e, 0x71, 0xd9, 0x02, 0x63, 0xfb, 0xd7, 0x2d, 0x18, 0x94, - 0x68, 0x87, 0x61, 0x5b, 0xb0, 0x02, 0xfd, 0x21, 0x9b, 0x04, 0x39, 0x34, 0x76, 0xbb, 0x8e, 0xf3, - 0xf9, 0x8a, 0x39, 0x3e, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0x53, 0x18, 0xaa, 0xee, 0xbf, 0x4b, 0x14, - 0x86, 0xaa, 0x3f, 0x39, 0x97, 0xd2, 0x9f, 0xb2, 0x3e, 0x6b, 0x12, 0x78, 0xfa, 0x30, 0x69, 0x06, - 0x64, 0xdd, 0xbd, 0x93, 0x7c, 0x98, 0x94, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x16, 0x0c, 0x57, 0xa5, - 0xa6, 0x28, 0xde, 0xe1, 0x67, 0xdb, 0x6a, 0x2d, 0x95, 0x82, 0x9b, 0x4b, 0x3a, 0xe7, 0xb5, 0xfa, - 0xd8, 0xa0, 0x66, 0x5a, 0x81, 0x15, 0x3b, 0x59, 0x81, 0xc5, 0x74, 0xf3, 0x6d, 0xa2, 0x7e, 0xd2, - 0x82, 0x3e, 0xae, 0x21, 0xe8, 0x4e, 0x41, 0xa3, 0xe9, 0xfb, 0xe3, 0xb1, 0xbb, 0x41, 0x0b, 0x05, - 0x67, 0x83, 0x56, 0x60, 0x90, 0xfd, 0x60, 0x1a, 0x8e, 0x62, 0xbe, 0xcf, 0x18, 0x6f, 0x55, 0xef, - 0xe0, 0x0d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd2, 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d, - 0xeb, 0xc1, 0x5d, 0xfa, 0x85, 0x07, 0x75, 0xe9, 0x6f, 0xc0, 0x58, 0x55, 0xb3, 0x0e, 0x88, 0x67, - 0xf2, 0x5c, 0xdb, 0x45, 0xa2, 0x19, 0x12, 0x70, 0x19, 0xea, 0xbc, 0x49, 0x04, 0x27, 0xa9, 0xa2, - 0x8f, 0xc3, 0x30, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xa4, 0x7b, 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, - 0x99, 0xbb, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0xbf, 0xb4, 0x00, 0x2d, 0x36, 0x37, 0x49, 0x83, 0x04, - 0x4e, 0x3d, 0x56, 0xf2, 0x7d, 0xc1, 0x82, 0x29, 0x92, 0x2a, 0x9e, 0xf7, 0x1b, 0x0d, 0xf1, 0xa4, - 0xcf, 0x91, 0x3a, 0x2d, 0xe6, 0xd4, 0x89, 0xd9, 0xfa, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x81, - 0x49, 0x7e, 0x4b, 0x2a, 0x80, 0x66, 0x6b, 0xf7, 0x90, 0x20, 0x3c, 0xb9, 0x96, 0x46, 0xc1, 0x59, - 0xf5, 0xec, 0x5f, 0x1f, 0x81, 0xdc, 0x5e, 0xbc, 0xaf, 0xdd, 0x7c, 0x5f, 0xbb, 0xf9, 0xbe, 0x76, - 0xf3, 0x7d, 0xed, 0xe6, 0xfb, 0xda, 0xcd, 0xf7, 0xb5, 0x9b, 0xef, 0x52, 0xed, 0xe6, 0xdf, 0xb6, - 0xe0, 0xa8, 0xba, 0xbe, 0x8c, 0x07, 0xfb, 0x67, 0x60, 0x92, 0x6f, 0xb7, 0xf9, 0xba, 0xe3, 0x36, - 0xd6, 0x48, 0xa3, 0x59, 0x77, 0x22, 0x69, 0xc3, 0x74, 0x21, 0x73, 0xe5, 0x26, 0x1c, 0x25, 0x8c, - 0x8a, 0xdc, 0xe3, 0x2c, 0x03, 0x80, 0xb3, 0x9a, 0xb1, 0x7f, 0x65, 0x00, 0x7a, 0x17, 0xb7, 0x89, - 0x17, 0x1d, 0xc2, 0xd3, 0xa6, 0x0a, 0xa3, 0xae, 0xb7, 0xed, 0xd7, 0xb7, 0x49, 0x8d, 0xc3, 0x0f, - 0xf2, 0x02, 0x3f, 0x26, 0x48, 0x8f, 0x2e, 0x1b, 0x24, 0x70, 0x82, 0xe4, 0x83, 0xd0, 0x11, 0x5d, - 0x82, 0x3e, 0x7e, 0xf9, 0x08, 0x05, 0x51, 0xe6, 0x99, 0xcd, 0x06, 0x51, 0x5c, 0xa9, 0xb1, 0xfe, - 0x8a, 0x5f, 0x6e, 0xa2, 0x3a, 0xfa, 0x34, 0x8c, 0xae, 0xbb, 0x41, 0x18, 0xad, 0xb9, 0x0d, 0x7a, - 0x35, 0x34, 0x9a, 0xf7, 0xa0, 0x13, 0x52, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x03, - 0x46, 0xea, 0x8e, 0xde, 0x54, 0xff, 0x81, 0x9b, 0x52, 0xb7, 0xc3, 0x55, 0x9d, 0x10, 0x36, 0xe9, - 0xd2, 0xed, 0x54, 0x65, 0x6a, 0x8d, 0x01, 0x26, 0xce, 0x50, 0xdb, 0x89, 0xeb, 0x33, 0x38, 0x8c, - 0x32, 0x68, 0xcc, 0xdd, 0x60, 0xd0, 0x64, 0xd0, 0x34, 0xa7, 0x82, 0x4f, 0xc1, 0x20, 0xa1, 0x43, - 0x48, 0x09, 0x8b, 0x0b, 0xe6, 0x7c, 0x77, 0x7d, 0x5d, 0x71, 0xab, 0x81, 0x6f, 0x6a, 0xe3, 0x16, - 0x25, 0x25, 0x1c, 0x13, 0x45, 0xf3, 0xd0, 0x17, 0x92, 0xc0, 0x55, 0x12, 0xff, 0x36, 0xd3, 0xc8, - 0xd0, 0xb8, 0x4b, 0x23, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x1c, 0x26, 0x8a, 0x65, 0x97, 0x81, - 0xb6, 0xbc, 0x66, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x3a, 0xf4, 0x07, 0xa4, 0xce, 0xd4, 0xbd, 0x23, - 0xdd, 0x2f, 0x72, 0xae, 0x3d, 0xe6, 0xf5, 0xb0, 0x24, 0x80, 0xae, 0x00, 0x0a, 0x08, 0x65, 0xf0, - 0x5c, 0x6f, 0x43, 0x19, 0xe1, 0x8b, 0x83, 0x56, 0x31, 0xd2, 0x38, 0xc6, 0x90, 0xde, 0xac, 0x38, - 0xa3, 0x1a, 0xba, 0x04, 0x13, 0xaa, 0x74, 0xd9, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x63, 0x8c, 0x96, - 0x92, 0xaf, 0xe0, 0x24, 0x02, 0x4e, 0xd7, 0xb1, 0x7f, 0xde, 0x02, 0x3e, 0xce, 0x87, 0x20, 0x55, - 0x78, 0xd5, 0x94, 0x2a, 0x9c, 0xc8, 0x9d, 0xb9, 0x1c, 0x89, 0xc2, 0xcf, 0x5b, 0x30, 0xa4, 0xcd, - 0x6c, 0xbc, 0x66, 0xad, 0x36, 0x6b, 0xb6, 0x05, 0xe3, 0x74, 0xa5, 0x5f, 0xbb, 0x15, 0x92, 0x60, - 0x9b, 0xd4, 0xd8, 0xc2, 0x2c, 0xdc, 0xdb, 0xc2, 0x54, 0x06, 0xbf, 0x57, 0x13, 0x04, 0x71, 0xaa, - 0x09, 0xfb, 0x53, 0xb2, 0xab, 0xca, 0x3e, 0xba, 0xaa, 0xe6, 0x3c, 0x61, 0x1f, 0xad, 0x66, 0x15, - 0xc7, 0x38, 0x74, 0xab, 0x6d, 0xfa, 0x61, 0x94, 0xb4, 0x8f, 0xbe, 0xec, 0x87, 0x11, 0x66, 0x10, - 0xfb, 0x39, 0x80, 0xc5, 0x3b, 0xa4, 0xca, 0x57, 0xac, 0xfe, 0xe8, 0xb1, 0xf2, 0x1f, 0x3d, 0xf6, - 0x1f, 0x58, 0x30, 0xba, 0x34, 0x6f, 0xdc, 0x5c, 0x33, 0x00, 0xfc, 0xa5, 0x76, 0xf3, 0xe6, 0xaa, - 0x34, 0xd2, 0xe1, 0x76, 0x0a, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x04, 0x14, 0xeb, 0x2d, 0x4f, 0x88, - 0x3d, 0xfb, 0xe9, 0xf5, 0x78, 0xb5, 0xe5, 0x61, 0x5a, 0xa6, 0x79, 0xb2, 0x15, 0xbb, 0xf6, 0x64, - 0xeb, 0x18, 0x50, 0x07, 0x95, 0xa0, 0xf7, 0xf6, 0x6d, 0xb7, 0xc6, 0xe3, 0x04, 0x08, 0x03, 0xa2, - 0x9b, 0x37, 0x97, 0x17, 0x42, 0xcc, 0xcb, 0xed, 0x2f, 0x16, 0x61, 0x7a, 0xa9, 0x4e, 0xee, 0xbc, - 0xc3, 0x58, 0x09, 0xdd, 0xfa, 0xe1, 0x1d, 0x4c, 0x80, 0x74, 0x50, 0x5f, 0xcb, 0xce, 0xe3, 0xb1, - 0x0e, 0xfd, 0xdc, 0x3c, 0x58, 0x46, 0x4e, 0xc8, 0x54, 0xca, 0xe6, 0x0f, 0xc8, 0x0c, 0x37, 0x33, - 0x16, 0x4a, 0x59, 0x75, 0x61, 0x8a, 0x52, 0x2c, 0x89, 0x4f, 0xbf, 0x04, 0xc3, 0x3a, 0xe6, 0x81, - 0xbc, 0x9e, 0xbf, 0xb7, 0x08, 0xe3, 0xb4, 0x07, 0x0f, 0x74, 0x22, 0xae, 0xa7, 0x27, 0xe2, 0x7e, - 0x7b, 0xbe, 0x76, 0x9e, 0x8d, 0xb7, 0x92, 0xb3, 0x71, 0x21, 0x6f, 0x36, 0x0e, 0x7b, 0x0e, 0xbe, - 0xcf, 0x82, 0xc9, 0xa5, 0xba, 0x5f, 0xdd, 0x4a, 0x78, 0xa7, 0xbe, 0x00, 0x43, 0xf4, 0x38, 0x0e, - 0x8d, 0x40, 0x2d, 0x46, 0xe8, 0x1e, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xfa, 0xf5, 0xe5, 0x85, - 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xf7, 0x2c, 0x38, 0x75, 0x69, 0x7e, 0x31, 0x5e, - 0x8a, 0xa9, 0xa0, 0x43, 0x67, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, 0x2f, - 0x04, 0xf4, 0xdd, 0x12, 0xdf, 0xeb, 0x3a, 0xc0, 0x25, 0x5c, 0x9e, 0x17, 0xe7, 0xae, 0xd4, 0x02, - 0x59, 0xb9, 0x5a, 0xa0, 0xc7, 0xa0, 0x9f, 0xde, 0x0b, 0x6e, 0x55, 0xf6, 0x9b, 0x9b, 0x5d, 0xf0, - 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x59, 0x30, 0x79, 0xc9, 0x8d, 0xe8, 0xa5, 0x9d, 0x8c, 0xaa, 0x43, - 0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x27, 0x19, 0x55, 0x07, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07, - 0x6d, 0xbb, 0xcc, 0xdf, 0xa5, 0x60, 0xea, 0xdd, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xcd, - 0x0d, 0x98, 0xc8, 0x72, 0x47, 0x1c, 0xdc, 0x6a, 0xbc, 0x16, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x9f, - 0x5b, 0x50, 0xba, 0xc4, 0xbd, 0x76, 0xd7, 0xc3, 0x9c, 0x43, 0xf7, 0x39, 0x18, 0x24, 0x52, 0x41, - 0x20, 0x7a, 0xad, 0x18, 0x51, 0xa5, 0x39, 0xe0, 0xc1, 0x7d, 0x14, 0x5e, 0x17, 0x2e, 0xf4, 0x07, - 0xf3, 0x81, 0x5e, 0x02, 0x44, 0xf4, 0xb6, 0xf4, 0x68, 0x47, 0x2c, 0x6c, 0xca, 0x62, 0x0a, 0x8a, - 0x33, 0x6a, 0xd8, 0x3f, 0x61, 0xc1, 0x51, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0xbf, 0x5a, 0x80, - 0x91, 0xcb, 0x6b, 0x6b, 0xe5, 0x4b, 0x24, 0xd2, 0x56, 0x65, 0x7b, 0xb5, 0x3f, 0xd6, 0xb4, 0x97, - 0xed, 0xde, 0x88, 0xad, 0xc8, 0xad, 0xcf, 0xf0, 0x18, 0x7e, 0x33, 0xcb, 0x5e, 0x74, 0x2d, 0xa8, - 0x44, 0x81, 0xeb, 0x6d, 0x64, 0xae, 0x74, 0xc9, 0xb3, 0x14, 0xf3, 0x78, 0x16, 0xf4, 0x1c, 0xf4, - 0xb1, 0x20, 0x82, 0x72, 0x12, 0x1e, 0x52, 0x4f, 0x2c, 0x56, 0xba, 0xbf, 0x5b, 0x1a, 0xbc, 0x8e, - 0x97, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x75, 0x18, 0xda, 0x8c, 0xa2, 0xe6, 0x65, 0xe2, 0xd4, 0x48, - 0x20, 0x4f, 0xd9, 0xd3, 0x59, 0xa7, 0x2c, 0x1d, 0x04, 0x8e, 0x16, 0x1f, 0x4c, 0x71, 0x59, 0x88, - 0x75, 0x3a, 0x76, 0x05, 0x20, 0x86, 0xdd, 0x27, 0xc5, 0x8d, 0xbd, 0x06, 0x83, 0xf4, 0x73, 0x67, - 0xeb, 0xae, 0xd3, 0x5e, 0x35, 0xfe, 0x14, 0x0c, 0x4a, 0xc5, 0x77, 0x28, 0x42, 0x7c, 0xb0, 0x1b, - 0x49, 0xea, 0xc5, 0x43, 0x1c, 0xc3, 0xed, 0x47, 0x41, 0x58, 0x00, 0xb7, 0x23, 0x69, 0xaf, 0xc3, - 0x11, 0x66, 0xca, 0xec, 0x44, 0x9b, 0xc6, 0x1a, 0xed, 0xbc, 0x18, 0x9e, 0x16, 0xef, 0x3a, 0xfe, - 0x65, 0x53, 0x9a, 0x0b, 0xf9, 0xb0, 0xa4, 0x18, 0xbf, 0xf1, 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x96, - 0x2b, 0xf9, 0xb1, 0xa9, 0x2e, 0xc2, 0x30, 0x67, 0x17, 0xe9, 0xd2, 0x70, 0xea, 0xa2, 0x5d, 0x25, - 0x01, 0x5d, 0xd3, 0x60, 0xd8, 0xc0, 0x44, 0xa7, 0xa0, 0xe8, 0xbe, 0xed, 0x25, 0x1d, 0x2c, 0x97, - 0xdf, 0x58, 0xc5, 0xb4, 0x9c, 0x82, 0x29, 0xe7, 0xc9, 0x8f, 0x74, 0x05, 0x56, 0xdc, 0xe7, 0xab, - 0x30, 0xea, 0x86, 0xd5, 0xd0, 0x5d, 0xf6, 0xe8, 0x3e, 0xd5, 0x76, 0xba, 0x92, 0x39, 0xd0, 0x4e, - 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0xf7, 0x4b, 0x6f, 0xd7, 0xdc, 0x6b, 0xc7, 0xc8, 0x18, 0xf4, 0xf8, - 0x6f, 0xb2, 0xaf, 0x0b, 0x99, 0x08, 0x5e, 0x1c, 0xff, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0x41, - 0x57, 0xdd, 0x74, 0x9a, 0xb3, 0xad, 0x68, 0x73, 0xc1, 0x0d, 0xab, 0xfe, 0x36, 0x09, 0x76, 0xd8, - 0x5b, 0x7c, 0x20, 0x7e, 0xd0, 0x29, 0xc0, 0xfc, 0xe5, 0xd9, 0x32, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, - 0x16, 0xc6, 0x64, 0x61, 0x85, 0x84, 0xec, 0x0a, 0x18, 0x62, 0x64, 0x94, 0xcb, 0xa3, 0x28, 0x56, - 0x44, 0x92, 0xf8, 0x26, 0x83, 0x0b, 0xf7, 0x83, 0xc1, 0x7d, 0x11, 0x46, 0x5c, 0xcf, 0x8d, 0x5c, - 0x27, 0xf2, 0xb9, 0xfe, 0x88, 0x3f, 0xbb, 0x99, 0x80, 0x79, 0x59, 0x07, 0x60, 0x13, 0xcf, 0xfe, - 0x6f, 0x3d, 0x30, 0xc1, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0x5e, 0x5a, 0x61, 0xd7, 0xd3, 0x2b, 0xec, - 0x7e, 0x70, 0xee, 0xf7, 0xbc, 0xcc, 0x3e, 0x67, 0xc1, 0x04, 0x93, 0x71, 0x1b, 0xcb, 0xec, 0x3c, - 0x0c, 0x06, 0x86, 0x37, 0xea, 0xa0, 0xae, 0xd4, 0x92, 0x8e, 0xa5, 0x31, 0x0e, 0x7a, 0x0d, 0xa0, - 0x19, 0xcb, 0xd0, 0x0b, 0x46, 0x08, 0x51, 0xc8, 0x15, 0x9f, 0x6b, 0x75, 0xec, 0x4f, 0xc3, 0xa0, - 0x72, 0x37, 0x95, 0xfe, 0xe6, 0x56, 0x8e, 0xbf, 0x79, 0x67, 0x36, 0x42, 0xda, 0xc6, 0x15, 0x33, - 0x6d, 0xe3, 0xbe, 0x6c, 0x41, 0xac, 0xe1, 0x40, 0x6f, 0xc0, 0x60, 0xd3, 0x67, 0x06, 0xd1, 0x81, - 0xf4, 0x32, 0x78, 0xb4, 0xad, 0x8a, 0x84, 0xc7, 0x09, 0x0c, 0xf8, 0x74, 0x94, 0x65, 0x55, 0x1c, - 0x53, 0x41, 0x57, 0xa0, 0xbf, 0x19, 0x90, 0x4a, 0xc4, 0x82, 0x58, 0x75, 0x4f, 0x90, 0x2f, 0x5f, - 0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x2f, 0x16, 0x60, 0x3c, 0x89, 0x8a, 0x5e, 0x81, 0x1e, 0x72, 0x87, - 0x54, 0x45, 0x7f, 0x33, 0x79, 0x82, 0x58, 0x46, 0xc2, 0x07, 0x80, 0xfe, 0xc7, 0xac, 0x16, 0xba, - 0x0c, 0xfd, 0x94, 0x21, 0xb8, 0xa4, 0x02, 0x36, 0x3e, 0x9c, 0xc7, 0x54, 0x28, 0xce, 0x8a, 0x77, - 0x4e, 0x14, 0x61, 0x59, 0x9d, 0x19, 0xa4, 0x55, 0x9b, 0x15, 0xfa, 0xd6, 0x8a, 0xda, 0x89, 0x04, - 0xd6, 0xe6, 0xcb, 0x1c, 0x49, 0x50, 0xe3, 0x06, 0x69, 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x6b, 0xd0, - 0x1b, 0xd6, 0x09, 0x69, 0x0a, 0x8b, 0x83, 0x4c, 0x29, 0x67, 0x85, 0x22, 0x08, 0x4a, 0x4c, 0x2a, - 0xc2, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x2d, 0xf8, 0x1c, 0x6f, 0x83, 0x1c, 0x82, - 0x62, 0x60, 0x01, 0x7a, 0xc2, 0x26, 0xa9, 0xb6, 0xb3, 0xf6, 0x8f, 0xfb, 0x53, 0x69, 0x92, 0x6a, - 0xbc, 0x66, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x0f, 0x30, 0x1a, 0xa3, 0x2d, 0x47, 0xa4, 0x81, - 0x9e, 0x31, 0xa2, 0xdc, 0x9c, 0x48, 0x44, 0xb9, 0x19, 0x64, 0xd8, 0x9a, 0x0c, 0xfa, 0xd3, 0x50, - 0x6c, 0x38, 0x77, 0x84, 0x90, 0xf1, 0xa9, 0xf6, 0xdd, 0xa0, 0xf4, 0x67, 0x56, 0x9c, 0x3b, 0xfc, - 0x1d, 0xfe, 0x94, 0xdc, 0x63, 0x2b, 0xce, 0x9d, 0x8e, 0x16, 0xe9, 0xb4, 0x11, 0xd6, 0x96, 0xeb, - 0x09, 0xe3, 0xb4, 0xae, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0xd7, 0x45, 0x5b, 0xae, 0x87, 0xee, - 0x42, 0xbf, 0xb0, 0x1d, 0x15, 0xe1, 0xf7, 0xce, 0x77, 0xd1, 0x9e, 0x30, 0x3d, 0xe5, 0x6d, 0x9e, - 0x97, 0x72, 0x06, 0x51, 0xda, 0xb1, 0x5d, 0xd9, 0x20, 0xfa, 0x3b, 0x16, 0x8c, 0x8a, 0xdf, 0x98, - 0xbc, 0xdd, 0x22, 0x61, 0x24, 0xf8, 0xf0, 0x0f, 0x75, 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x43, - 0xf2, 0xca, 0x34, 0x81, 0x1d, 0x7b, 0x94, 0xe8, 0x05, 0xfa, 0x45, 0x0b, 0x8e, 0x34, 0x9c, 0x3b, - 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x0d, 0xc6, 0x2b, 0xdd, 0x4d, 0x7f, 0xaa, 0x3a, - 0xef, 0xa4, 0x54, 0xb8, 0x1e, 0xc9, 0x42, 0xe9, 0xd8, 0xd5, 0xcc, 0x7e, 0x4d, 0xaf, 0xc3, 0x80, - 0x5c, 0x6f, 0x0f, 0xd2, 0x30, 0x9e, 0xb5, 0x23, 0xd6, 0xda, 0x03, 0x6d, 0xe7, 0xd3, 0x30, 0xac, - 0xaf, 0xb1, 0x07, 0xda, 0xd6, 0xdb, 0x30, 0x99, 0xb1, 0x96, 0x1e, 0x68, 0x93, 0xb7, 0xe1, 0x44, - 0xee, 0xfa, 0x78, 0xa0, 0x8e, 0x0d, 0x5f, 0xb5, 0xf4, 0x73, 0xf0, 0x10, 0xb4, 0x33, 0xf3, 0xa6, - 0x76, 0xe6, 0x74, 0xfb, 0x9d, 0x93, 0xa3, 0xa2, 0x79, 0x4b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0xeb, - 0xd0, 0x57, 0xa7, 0x25, 0xd2, 0x02, 0xd9, 0xee, 0xbc, 0x23, 0x63, 0xbe, 0x98, 0x95, 0x87, 0x58, - 0x50, 0xb0, 0xbf, 0x64, 0x41, 0x86, 0x6b, 0x06, 0xe5, 0x93, 0x5a, 0x6e, 0x8d, 0x0d, 0x49, 0x31, - 0xe6, 0x93, 0x54, 0x10, 0x98, 0x53, 0x50, 0xdc, 0x70, 0x6b, 0xc2, 0xb3, 0x58, 0x81, 0x2f, 0x51, - 0xf0, 0x86, 0x5b, 0x43, 0x4b, 0x80, 0xc2, 0x56, 0xb3, 0x59, 0x67, 0x66, 0x4b, 0x4e, 0xfd, 0x52, - 0xe0, 0xb7, 0x9a, 0xdc, 0xdc, 0xb8, 0xc8, 0x85, 0x44, 0x95, 0x14, 0x14, 0x67, 0xd4, 0xb0, 0x7f, - 0xd5, 0x82, 0x9e, 0x43, 0x98, 0x26, 0x6c, 0x4e, 0xd3, 0x33, 0xb9, 0xa4, 0x45, 0xd6, 0x86, 0x19, - 0xec, 0xdc, 0x5e, 0xbc, 0x13, 0x11, 0x2f, 0x64, 0x0c, 0x47, 0xe6, 0xac, 0xed, 0x5a, 0x30, 0x79, - 0xd5, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x24, 0x58, 0xf6, 0x36, 0x0e, 0x64, 0xdb, 0x5f, - 0xe8, 0x68, 0xdb, 0x7f, 0x11, 0xfa, 0xdc, 0xa6, 0x16, 0xf6, 0xfd, 0x0c, 0x9d, 0xdd, 0xe5, 0xb2, - 0x88, 0xf8, 0x8e, 0x8c, 0xc6, 0x59, 0x29, 0x16, 0xf8, 0x74, 0x59, 0x72, 0xa3, 0xba, 0x9e, 0xfc, - 0x65, 0x49, 0xdf, 0x3a, 0xc9, 0x70, 0x66, 0x86, 0xf9, 0xf7, 0x26, 0x18, 0x4d, 0x08, 0x0f, 0x46, - 0x0c, 0xfd, 0x2e, 0xff, 0x52, 0xb1, 0x36, 0x1f, 0xcf, 0x7e, 0x83, 0xa4, 0x06, 0x46, 0xf3, 0xcd, - 0xe3, 0x05, 0x58, 0x12, 0xb2, 0x2f, 0x42, 0x66, 0xf8, 0x99, 0xce, 0xf2, 0x25, 0xfb, 0x63, 0x30, - 0xc1, 0x6a, 0x1e, 0x50, 0x76, 0x63, 0x27, 0xa4, 0xe2, 0x19, 0x11, 0x7c, 0xed, 0xff, 0x6c, 0x01, - 0x5a, 0xf1, 0x6b, 0xee, 0xfa, 0x8e, 0x20, 0xce, 0xbf, 0xff, 0x6d, 0x28, 0xf1, 0xc7, 0x71, 0x32, - 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xb8, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, 0x1d, - 0x77, 0xa2, 0x87, 0xde, 0x48, 0x04, 0x1d, 0xfc, 0x70, 0x2a, 0xe8, 0xe0, 0xe3, 0x99, 0x76, 0x31, - 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0xe7, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcb, 0x8c, - 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0xbe, 0x4b, 0x62, 0xff, 0xc6, 0x82, 0x38, - 0xdc, 0xd5, 0x21, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, 0xb8, - 0xd1, 0x15, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, 0xb3, - 0xf1, 0xf5, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0xfb, 0x13, 0xa8, 0x72, 0x1b, - 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x98, 0x0d, 0xcd, 0xb4, - 0xf4, 0x5c, 0xbd, 0x9a, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, 0xea, - 0xeb, 0xe0, 0x82, 0xfd, 0x15, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4b, 0x5c, 0x40, 0x54, 0x7f, 0x72, - 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x07, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x5d, 0xa2, - 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0xdd, 0xdf, 0x2d, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, 0x6b, - 0x5c, 0xc5, 0xfe, 0x69, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x80, 0xde, 0xe6, 0xa6, 0x13, 0x92, - 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0xee, 0xef, 0x96, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, 0x76, - 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0xbf, 0xb4, 0xa0, 0x67, 0x95, 0xde, 0x5e, - 0x0f, 0xfe, 0x08, 0x78, 0xd5, 0x38, 0x02, 0x4e, 0xe6, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, 0x89, - 0xdd, 0x7f, 0x3a, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, 0x3e, - 0x67, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x02, 0xfa, 0x85, 0x9f, - 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x64, 0x11, 0x8c, 0xfc, 0x48, 0xe8, 0xd7, - 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x46, 0xa5, 0xba, 0x49, 0x6a, - 0xad, 0xba, 0xeb, 0x6d, 0x2c, 0x6f, 0x78, 0xbe, 0x2a, 0x5e, 0xbc, 0x43, 0xaa, 0x2d, 0xa6, 0x1b, - 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbb, 0xb7, 0x5b, 0x9a, 0xc1, 0x07, 0xa2, 0x8d, 0x0f, - 0xd8, 0x17, 0xf4, 0x7b, 0x16, 0x9c, 0xe7, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, 0x59, - 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xa2, 0x18, 0xd0, 0xf3, 0xe5, 0x83, 0xb5, 0x85, - 0x0f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x44, 0x84, 0x89, 0x14, 0x77, 0xc0, 0x0b, 0xc6, 0x92, - 0x78, 0x38, 0xb1, 0x24, 0x26, 0x0c, 0xe4, 0xfb, 0x73, 0xfc, 0x87, 0x30, 0x41, 0x0f, 0xe7, 0xcb, - 0xc4, 0x09, 0xa2, 0x5b, 0xc4, 0xe1, 0x56, 0x89, 0xc5, 0x03, 0x9f, 0xfe, 0x4a, 0x3c, 0x7e, 0x35, - 0x49, 0x0c, 0xa7, 0xe9, 0xbf, 0x97, 0xee, 0x1c, 0x0f, 0xc6, 0x53, 0x91, 0x3e, 0xdf, 0x84, 0x41, - 0xe5, 0x24, 0x25, 0x0e, 0x9d, 0xf6, 0x01, 0x73, 0x93, 0x14, 0xb8, 0xd0, 0x33, 0x76, 0xd0, 0x8b, - 0xc9, 0xd9, 0xff, 0xa4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15, 0x06, 0x9c, 0x90, 0x05, 0xf1, 0xae, - 0xb5, 0x93, 0x4b, 0xa7, 0x9a, 0x61, 0x8e, 0x6a, 0xb3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x65, 0x6e, - 0xfb, 0xb9, 0x4d, 0xda, 0x09, 0xa5, 0x53, 0xd4, 0x40, 0x5a, 0x87, 0x6e, 0x13, 0x2c, 0xea, 0xa3, - 0x4f, 0x70, 0xe3, 0xdc, 0x2b, 0x9e, 0x7f, 0xdb, 0xbb, 0xe4, 0xfb, 0x32, 0x24, 0x50, 0x77, 0x04, - 0x27, 0xa4, 0x49, 0xae, 0xaa, 0x8e, 0x4d, 0x6a, 0xdd, 0x85, 0xce, 0xfe, 0x0c, 0xb0, 0xbc, 0x24, - 0x66, 0x4c, 0x82, 0x10, 0x11, 0x18, 0x13, 0x31, 0x48, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xe7, 0xb7, - 0x59, 0x3b, 0xd6, 0xe3, 0x5c, 0x31, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x93, 0x1f, 0xc2, 0x4b, 0xc4, - 0x89, 0x5a, 0x01, 0x09, 0xd1, 0x47, 0x61, 0x2a, 0xfd, 0x32, 0x16, 0xea, 0x10, 0x8b, 0x71, 0xcf, - 0x27, 0xf7, 0x76, 0x4b, 0x53, 0x95, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xfe, 0x59, 0x0b, 0x98, 0x27, - 0xf8, 0x21, 0x70, 0x3e, 0x1f, 0x31, 0x39, 0x9f, 0xa9, 0xbc, 0xe9, 0xcc, 0x61, 0x7a, 0x9e, 0xe7, - 0x6b, 0xb8, 0x1c, 0xf8, 0x77, 0x76, 0x84, 0xed, 0x56, 0xe7, 0x67, 0x9c, 0xfd, 0x45, 0x0b, 0x58, - 0x12, 0x1f, 0xcc, 0x5f, 0xed, 0x52, 0xc1, 0xd1, 0xd9, 0x2c, 0xe1, 0xa3, 0x30, 0xb0, 0x2e, 0x86, - 0x3f, 0x43, 0xe8, 0x64, 0x74, 0xd8, 0xa4, 0x2d, 0x27, 0x4d, 0x78, 0x74, 0x8a, 0x7f, 0x58, 0x51, - 0xb3, 0xff, 0xb1, 0x05, 0xd3, 0xf9, 0xd5, 0xd0, 0x75, 0x38, 0x1e, 0x90, 0x6a, 0x2b, 0x08, 0xe9, - 0x96, 0x10, 0x0f, 0x20, 0xe1, 0x14, 0xc5, 0xa7, 0xfa, 0xa1, 0xbd, 0xdd, 0xd2, 0x71, 0x9c, 0x8d, - 0x82, 0xf3, 0xea, 0xa2, 0x97, 0x60, 0xb4, 0x15, 0x72, 0xce, 0x8f, 0x31, 0x5d, 0xa1, 0x88, 0x14, - 0xcd, 0xfc, 0x86, 0xae, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0xbf, 0x8b, 0x2f, 0x47, 0x15, 0x2c, 0xba, - 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x68, 0xa7, 0x5b, 0x9f, 0x5d, 0x97, - 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, 0x82, 0xe3, 0x3a, 0xa2, 0xe6, 0x0e, - 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0x3b, 0x27, - 0x57, 0xe8, 0x35, 0x51, 0xbe, 0x2f, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, 0x86, - 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, 0xfe, - 0x33, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x6d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x73, 0xf1, 0x4e, - 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0x4f, 0x75, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, 0x95, - 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc1, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, 0x26, - 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x45, 0x7e, 0x68, 0xb0, - 0xb7, 0xc7, 0x13, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, 0xca, - 0xbc, 0x18, 0x4b, 0x38, 0x3a, 0x07, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, 0x42, - 0xac, 0xa0, 0xe8, 0x59, 0x80, 0x66, 0xe0, 0x6f, 0xbb, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, 0xe7, - 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x65, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, 0xf7, - 0xca, 0x6e, 0xec, 0xba, 0x0e, 0xc4, 0x26, 0x2e, 0x9a, 0x85, 0xbe, 0xc8, 0x61, 0xd6, 0x66, 0xbd, - 0xf9, 0x46, 0xf4, 0x6b, 0x14, 0x43, 0xcf, 0x2c, 0x47, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x29, 0xdd, - 0xeb, 0xf9, 0x4d, 0x24, 0xbc, 0x57, 0xba, 0xbb, 0xb5, 0x34, 0xe7, 0x7a, 0xe1, 0x15, 0x63, 0xd0, - 0x42, 0x2f, 0x01, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xae, 0x6c, 0x44, 0x15, 0x23, 0xb3, 0xe0, - 0xaf, 0xfa, 0xd1, 0xf5, 0x90, 0x2c, 0x2a, 0x0c, 0xac, 0x61, 0xdb, 0xdf, 0x3f, 0x04, 0x10, 0x3f, - 0x34, 0xd0, 0x5d, 0x18, 0xa8, 0x3a, 0x4d, 0xa7, 0xca, 0xd3, 0xa6, 0x16, 0xf3, 0xbc, 0x9e, 0xe3, - 0x1a, 0x33, 0xf3, 0x02, 0x9d, 0x2b, 0x6f, 0x64, 0xc8, 0xf0, 0x01, 0x59, 0xdc, 0x51, 0x61, 0xa3, - 0xda, 0x43, 0x9f, 0xb3, 0x60, 0x48, 0x44, 0x3a, 0x62, 0x33, 0x54, 0xc8, 0xd7, 0xb7, 0x69, 0xed, - 0xcf, 0xc6, 0x35, 0x78, 0x17, 0x9e, 0x93, 0x2b, 0x54, 0x83, 0x74, 0xec, 0x85, 0xde, 0x30, 0xfa, - 0xa0, 0x7c, 0xdb, 0x16, 0x8d, 0xa1, 0x54, 0x6f, 0xdb, 0x41, 0x76, 0xd5, 0xe8, 0xcf, 0xda, 0xeb, - 0xc6, 0xb3, 0xb6, 0x27, 0xdf, 0x7f, 0xd8, 0xe0, 0xb7, 0x3b, 0xbd, 0x68, 0x51, 0x59, 0x8f, 0x25, - 0xd2, 0x9b, 0xef, 0xf4, 0xaa, 0x3d, 0xec, 0x3a, 0xc4, 0x11, 0xf9, 0x34, 0x8c, 0xd5, 0x4c, 0xae, - 0x45, 0xac, 0xc4, 0xc7, 0xf3, 0xe8, 0x26, 0x98, 0x9c, 0x98, 0x4f, 0x49, 0x00, 0x70, 0x92, 0x30, - 0x2a, 0xf3, 0xd0, 0x32, 0xcb, 0xde, 0xba, 0x2f, 0x3c, 0xa8, 0xec, 0xdc, 0xb9, 0xdc, 0x09, 0x23, - 0xd2, 0xa0, 0x98, 0x31, 0x93, 0xb0, 0x2a, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x7d, 0xcc, 0xeb, - 0x31, 0x9c, 0x1a, 0xc8, 0x57, 0x6b, 0x98, 0x91, 0x50, 0xe3, 0x0d, 0xc9, 0xfe, 0x86, 0x58, 0x50, - 0x40, 0x97, 0xa5, 0x4f, 0x71, 0xb8, 0xec, 0x5d, 0x0f, 0x09, 0xf3, 0x29, 0x1e, 0x9c, 0x7b, 0x34, - 0x76, 0x17, 0xe6, 0xe5, 0x99, 0xf9, 0x67, 0x8d, 0x9a, 0x94, 0xed, 0x13, 0xff, 0x65, 0x5a, 0x5b, - 0x11, 0xb7, 0x2d, 0xb3, 0x7b, 0x66, 0xea, 0xdb, 0x78, 0x38, 0x6f, 0x98, 0x24, 0x70, 0x92, 0x26, - 0x65, 0xa1, 0xf9, 0xae, 0x17, 0x3e, 0x58, 0x9d, 0xce, 0x0e, 0x2e, 0x39, 0x60, 0xb7, 0x11, 0x2f, - 0xc1, 0xa2, 0x3e, 0x72, 0x61, 0x2c, 0x30, 0xd8, 0x0b, 0x19, 0x6e, 0xed, 0x6c, 0x77, 0x4c, 0x8c, - 0x16, 0xc8, 0xdf, 0x24, 0x83, 0x93, 0x74, 0xd1, 0xeb, 0x1a, 0xa3, 0x34, 0xd2, 0xfe, 0xe5, 0xdf, - 0x89, 0x35, 0x9a, 0xde, 0x82, 0x11, 0xe3, 0xb0, 0x79, 0xa0, 0x2a, 0x48, 0x0f, 0xc6, 0x93, 0x27, - 0xcb, 0x03, 0xd5, 0x3c, 0xfe, 0x49, 0x0f, 0x8c, 0x9a, 0x3b, 0x01, 0x9d, 0x87, 0x41, 0x41, 0x44, - 0x65, 0xb4, 0x52, 0x9b, 0x7b, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, 0x99, 0xb1, 0xea, 0x9a, 0xaf, - 0x40, 0x9c, 0xc8, 0x4c, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xb0, 0xb7, 0x7c, 0x3f, 0x52, 0xf7, 0xa8, - 0xda, 0x2e, 0x73, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x3f, 0xb7, 0x48, 0xe0, 0x91, 0xba, 0x99, 0xd2, - 0x41, 0xdd, 0x9f, 0x57, 0x74, 0x20, 0x36, 0x71, 0x29, 0x17, 0xe0, 0x87, 0x6c, 0xff, 0x89, 0x67, - 0x72, 0xec, 0x7b, 0x51, 0xe1, 0x51, 0x24, 0x24, 0x1c, 0x7d, 0x0c, 0x8e, 0xab, 0xf0, 0x89, 0x62, - 0x75, 0xc9, 0x16, 0xfb, 0x0c, 0xa9, 0xd6, 0xf1, 0xf9, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, - 0xa3, 0xe2, 0x29, 0x25, 0x29, 0xf6, 0x9b, 0x86, 0x84, 0x57, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xa4, - 0x14, 0xec, 0x8d, 0x21, 0x29, 0x0c, 0xa4, 0x93, 0x52, 0xe8, 0x70, 0x9c, 0xaa, 0x81, 0x66, 0x61, - 0x8c, 0xb3, 0x8e, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0x78, 0x76, 0xaa, 0x4d, 0x75, 0xcd, 0x04, 0xe3, - 0x24, 0x3e, 0xba, 0x08, 0xc3, 0x4e, 0x50, 0xdd, 0x74, 0x23, 0x52, 0xa5, 0x3b, 0x83, 0xd9, 0xf2, - 0x69, 0x96, 0x98, 0xb3, 0x1a, 0x0c, 0x1b, 0x98, 0xf6, 0x5d, 0x98, 0xcc, 0x08, 0x2f, 0x43, 0x17, - 0x8e, 0xd3, 0x74, 0xe5, 0x37, 0x25, 0xdc, 0x1d, 0x66, 0xcb, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba, - 0x3a, 0x59, 0x18, 0x1a, 0x2d, 0xf9, 0xb6, 0x5a, 0x9d, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xaf, - 0x0a, 0x30, 0x96, 0xa1, 0xa0, 0x63, 0x09, 0xa0, 0x13, 0x2f, 0xad, 0x38, 0xdf, 0xb3, 0x99, 0xe3, - 0xa4, 0x70, 0x80, 0x1c, 0x27, 0xc5, 0x4e, 0x39, 0x4e, 0x7a, 0xde, 0x49, 0x8e, 0x13, 0x73, 0xc4, - 0x7a, 0xbb, 0x1a, 0xb1, 0x8c, 0xbc, 0x28, 0x7d, 0x07, 0xcc, 0x8b, 0x62, 0x0c, 0x7a, 0x7f, 0x17, - 0x83, 0xfe, 0xa3, 0x05, 0x18, 0x4f, 0xea, 0xf6, 0x0e, 0x41, 0x3e, 0xfe, 0xba, 0x21, 0x1f, 0x3f, - 0xd7, 0x8d, 0x27, 0x7e, 0xae, 0xac, 0x1c, 0x27, 0x64, 0xe5, 0x4f, 0x76, 0x45, 0xad, 0xbd, 0xdc, - 0xfc, 0xef, 0x17, 0xe0, 0x68, 0xa6, 0xca, 0xf3, 0x10, 0xc6, 0xe6, 0x9a, 0x31, 0x36, 0xcf, 0x74, - 0x1d, 0xa5, 0x20, 0x77, 0x80, 0x6e, 0x26, 0x06, 0xe8, 0x7c, 0xf7, 0x24, 0xdb, 0x8f, 0xd2, 0x37, - 0x8a, 0x70, 0x3a, 0xb3, 0x5e, 0x2c, 0x5e, 0x5e, 0x32, 0xc4, 0xcb, 0xcf, 0x26, 0xc4, 0xcb, 0x76, - 0xfb, 0xda, 0xf7, 0x47, 0xde, 0x2c, 0xbc, 0xf5, 0x59, 0xcc, 0x91, 0x7b, 0x94, 0x35, 0x1b, 0xde, - 0xfa, 0x8a, 0x10, 0x36, 0xe9, 0xbe, 0x97, 0x64, 0xcc, 0xbf, 0x6b, 0xc1, 0x89, 0xcc, 0xb9, 0x39, - 0x04, 0x49, 0xdf, 0xaa, 0x29, 0xe9, 0x7b, 0xa2, 0xeb, 0xd5, 0x9a, 0x23, 0xfa, 0xfb, 0x7c, 0x5f, - 0xce, 0xb7, 0x30, 0x01, 0xc4, 0x35, 0x18, 0x72, 0xaa, 0x55, 0x12, 0x86, 0x2b, 0x7e, 0x4d, 0xa5, - 0x43, 0x78, 0x86, 0x3d, 0x0f, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, 0x74, 0x92, 0x44, 0x0c, 0xc6, 0x3a, - 0x05, 0xf4, 0x09, 0x18, 0x08, 0x65, 0x26, 0xcb, 0x9e, 0x7b, 0xcf, 0x64, 0xc9, 0x98, 0x5c, 0x25, - 0x60, 0x51, 0x24, 0xd1, 0x77, 0xea, 0xd1, 0x9f, 0xda, 0x88, 0x16, 0x79, 0x27, 0xef, 0x21, 0x06, - 0xd4, 0xb3, 0x00, 0xdb, 0xea, 0x25, 0x93, 0x14, 0x9e, 0x68, 0x6f, 0x1c, 0x0d, 0x0b, 0xbd, 0x06, - 0xe3, 0x21, 0x0f, 0x7c, 0x1a, 0x1b, 0xa9, 0xf0, 0xb5, 0xc8, 0x62, 0xc7, 0x55, 0x12, 0x30, 0x9c, - 0xc2, 0x46, 0x4b, 0xb2, 0x55, 0x66, 0x8e, 0xc4, 0x97, 0xe7, 0xd9, 0xb8, 0x45, 0x61, 0x92, 0x74, - 0x24, 0x39, 0x09, 0x6c, 0xf8, 0xb5, 0x9a, 0xe8, 0x13, 0x00, 0x74, 0x11, 0x09, 0x21, 0x4a, 0x7f, - 0xfe, 0x11, 0x4a, 0xcf, 0x96, 0x5a, 0xa6, 0x27, 0x03, 0x73, 0xb3, 0x5f, 0x50, 0x44, 0xb0, 0x46, - 0x10, 0x39, 0x30, 0x12, 0xff, 0x8b, 0x73, 0xb4, 0x9f, 0xcb, 0x6d, 0x21, 0x49, 0x9c, 0x29, 0x18, - 0x16, 0x74, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0e, 0x27, 0xb6, 0x73, 0x2d, 0x7f, 0x38, 0x27, 0xc8, - 0x92, 0xae, 0xe7, 0xdb, 0xfb, 0xe4, 0xd7, 0xb7, 0xff, 0x2d, 0xc0, 0x43, 0x6d, 0x4e, 0x7a, 0x34, - 0x6b, 0x6a, 0xed, 0x9f, 0x4a, 0x4a, 0x36, 0xa6, 0x33, 0x2b, 0x1b, 0xa2, 0x8e, 0xc4, 0x86, 0x2a, - 0xbc, 0xe3, 0x0d, 0xf5, 0x43, 0x96, 0x26, 0x73, 0xe2, 0x36, 0xdd, 0x1f, 0x39, 0xe0, 0x0d, 0x76, - 0x1f, 0x85, 0x50, 0xeb, 0x19, 0x92, 0x9c, 0x67, 0xbb, 0xee, 0x4e, 0xf7, 0xa2, 0x9d, 0xaf, 0x66, - 0x07, 0x7c, 0xe7, 0x42, 0x9e, 0x4b, 0x07, 0xfd, 0xfe, 0xc3, 0x0a, 0xfe, 0xfe, 0x75, 0x0b, 0x4e, - 0xa4, 0x8a, 0x79, 0x1f, 0x48, 0x28, 0xa2, 0xdd, 0xad, 0xbe, 0xe3, 0xce, 0x4b, 0x82, 0xfc, 0x1b, - 0x2e, 0x8b, 0x6f, 0x38, 0x91, 0x8b, 0x97, 0xec, 0xfa, 0x17, 0xfe, 0xb8, 0x34, 0xc9, 0x1a, 0x30, - 0x11, 0x71, 0x7e, 0xd7, 0x51, 0x13, 0xce, 0x54, 0x5b, 0x41, 0x10, 0x2f, 0xd6, 0x8c, 0xcd, 0xc9, - 0xdf, 0x7a, 0x8f, 0xee, 0xed, 0x96, 0xce, 0xcc, 0x77, 0xc0, 0xc5, 0x1d, 0xa9, 0x21, 0x0f, 0x50, - 0x23, 0x65, 0x5f, 0xc7, 0x0e, 0x80, 0x1c, 0x39, 0x4c, 0xda, 0x1a, 0x8f, 0x5b, 0xca, 0x66, 0x58, - 0xe9, 0x65, 0x50, 0x3e, 0x5c, 0xe9, 0xc9, 0xb7, 0x26, 0x2e, 0xfd, 0xf4, 0x55, 0x38, 0xdd, 0x7e, - 0x31, 0x1d, 0x28, 0x94, 0xc3, 0x1f, 0x58, 0x70, 0xaa, 0x6d, 0xbc, 0xb0, 0x6f, 0xc3, 0xc7, 0x82, - 0xfd, 0x59, 0x0b, 0x1e, 0xce, 0xac, 0x91, 0x74, 0xc2, 0xab, 0xd2, 0x42, 0xcd, 0x1c, 0x35, 0x8e, - 0x9c, 0x23, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x36, 0x0b, 0x1d, 0x2d, 0x36, 0x7f, 0xcb, 0x82, 0xd4, - 0x55, 0x7f, 0x08, 0x9c, 0xe7, 0xb2, 0xc9, 0x79, 0x3e, 0xda, 0xcd, 0x68, 0xe6, 0x30, 0x9d, 0x7f, - 0x31, 0x06, 0xc7, 0x72, 0x3c, 0xb1, 0xb7, 0x61, 0x62, 0xa3, 0x4a, 0xcc, 0xd0, 0x1b, 0xed, 0x42, - 0xd2, 0xb5, 0x8d, 0xd3, 0x31, 0x77, 0x74, 0x6f, 0xb7, 0x34, 0x91, 0x42, 0xc1, 0xe9, 0x26, 0xd0, - 0x67, 0x2d, 0x38, 0xe2, 0xdc, 0x0e, 0x17, 0xe9, 0x0b, 0xc2, 0xad, 0xce, 0xd5, 0xfd, 0xea, 0x16, - 0x65, 0xcc, 0xe4, 0xb6, 0x7a, 0x3e, 0x53, 0x18, 0x7d, 0xb3, 0x92, 0xc2, 0x37, 0x9a, 0x9f, 0xda, - 0xdb, 0x2d, 0x1d, 0xc9, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x32, 0x7e, 0x39, 0xd1, 0x66, 0xbb, - 0xe0, 0x30, 0x59, 0x2e, 0xf3, 0x9c, 0x25, 0x96, 0x10, 0xac, 0xe8, 0xa0, 0x4f, 0xc1, 0xe0, 0x86, - 0x8c, 0x03, 0x91, 0xc1, 0x72, 0xc7, 0x03, 0xd9, 0x3e, 0x3a, 0x06, 0x37, 0x81, 0x51, 0x48, 0x38, - 0x26, 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0x8a, 0x10, 0x75, 0xd9, 0x96, 0xb8, 0xa6, 0xad, 0x33, - 0x0f, 0xc1, 0xb4, 0xba, 0x54, 0xc1, 0xb4, 0x22, 0xba, 0x0c, 0xc5, 0xe0, 0x56, 0x4d, 0x68, 0x52, - 0x32, 0x37, 0x29, 0x9e, 0x5b, 0xc8, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0x16, 0x30, 0x25, 0x81, 0xca, - 0xd0, 0xcb, 0xdc, 0x97, 0x05, 0x6b, 0x9b, 0xf9, 0x94, 0x6f, 0x13, 0x06, 0x80, 0x7b, 0x24, 0x32, - 0x04, 0xcc, 0x09, 0xa1, 0x35, 0xe8, 0xab, 0xba, 0x5e, 0x8d, 0x04, 0x82, 0x97, 0xfd, 0x60, 0xa6, - 0xce, 0x84, 0x61, 0xe4, 0xd0, 0xe4, 0x2a, 0x04, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x34, 0x37, - 0xd7, 0xe5, 0x8d, 0x95, 0x4d, 0x95, 0x34, 0x37, 0x97, 0x2a, 0x6d, 0xa9, 0x32, 0x0c, 0x2c, 0x68, - 0xa1, 0x97, 0xa0, 0xb0, 0x5e, 0x15, 0xae, 0xc9, 0x99, 0xca, 0x13, 0x33, 0x8a, 0xd6, 0x5c, 0xdf, - 0xde, 0x6e, 0xa9, 0xb0, 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xb4, 0x0a, 0xfd, 0xeb, 0x3c, 0xee, 0x8e, - 0xd0, 0x8f, 0x3c, 0x9e, 0x1d, 0x12, 0x28, 0x15, 0x9a, 0x87, 0x7b, 0x97, 0x0a, 0x00, 0x96, 0x44, - 0x58, 0x02, 0x2a, 0x15, 0x3f, 0x48, 0x84, 0x2f, 0x9d, 0x39, 0x58, 0xcc, 0x27, 0xfe, 0xd4, 0x88, - 0xa3, 0x10, 0x61, 0x8d, 0x22, 0x5d, 0xd5, 0xce, 0xdd, 0x56, 0xc0, 0x72, 0x5b, 0x08, 0xd5, 0x48, - 0xe6, 0xaa, 0x9e, 0x95, 0x48, 0xed, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x05, 0x23, 0xdb, - 0x61, 0x73, 0x93, 0xc8, 0x2d, 0xcd, 0xc2, 0xde, 0xe5, 0x70, 0xb3, 0x37, 0x04, 0xa2, 0x1b, 0x44, - 0x2d, 0xa7, 0x9e, 0x3a, 0x85, 0xd8, 0xb3, 0xe6, 0x86, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf, - 0xdd, 0xf2, 0x6f, 0xed, 0x44, 0x44, 0x44, 0x1d, 0xcd, 0x1c, 0xfe, 0x37, 0x38, 0x4a, 0x7a, 0xf8, - 0x05, 0x00, 0x4b, 0x22, 0xe8, 0x86, 0x18, 0x1e, 0x76, 0x7a, 0x8e, 0xe7, 0x87, 0x34, 0x9f, 0x95, - 0x48, 0x39, 0x83, 0xc2, 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x9b, 0x9b, 0x7e, 0xe4, 0x7b, 0x89, - 0x13, 0x7a, 0x22, 0xff, 0x94, 0x2c, 0x67, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, - 0xaa, 0xc1, 0x68, 0xd3, 0x0f, 0xa2, 0xdb, 0x7e, 0x20, 0xd7, 0x17, 0x6a, 0x23, 0x28, 0x35, 0x30, - 0x45, 0x8b, 0xcc, 0x30, 0xc7, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x51, 0xe8, 0x0f, 0xab, 0x4e, 0x9d, - 0x2c, 0x5f, 0x9b, 0x9a, 0xcc, 0xbf, 0x7e, 0x2a, 0x1c, 0x25, 0x67, 0x75, 0xf1, 0xb0, 0x49, 0x1c, - 0x05, 0x4b, 0x72, 0x68, 0x09, 0x7a, 0x59, 0x62, 0x67, 0x16, 0x22, 0x37, 0x27, 0x32, 0x7b, 0xca, - 0xad, 0x86, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0x49, 0x81, 0x1f, 0x4e, 0x1d, - 0xcd, 0xdf, 0x03, 0x42, 0xc0, 0x70, 0xad, 0xd2, 0x6e, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, - 0x33, 0x3d, 0x4d, 0x8f, 0xb5, 0x31, 0x99, 0xcc, 0x3d, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, - 0xc2, 0xfe, 0x8d, 0x81, 0x34, 0xcf, 0xc2, 0x24, 0x4c, 0xff, 0xbf, 0x95, 0xb2, 0x99, 0xf8, 0x50, - 0xb7, 0x02, 0xef, 0xfb, 0xf8, 0x70, 0xfd, 0xac, 0x05, 0xc7, 0x9a, 0x99, 0x1f, 0x22, 0x18, 0x80, - 0xee, 0xe4, 0xe6, 0xfc, 0xd3, 0x55, 0x38, 0xe5, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x29, 0x1c, 0x28, - 0xbe, 0x63, 0xe1, 0xc0, 0x0a, 0x0c, 0x54, 0xf9, 0x4b, 0x4e, 0xa6, 0x01, 0xe8, 0x2a, 0x18, 0x28, - 0x63, 0x25, 0xc4, 0x13, 0x70, 0x1d, 0x2b, 0x12, 0xe8, 0x87, 0x2d, 0x38, 0x95, 0xec, 0x3a, 0x26, - 0x0c, 0x2c, 0x0c, 0x26, 0xb9, 0x58, 0x6b, 0x49, 0x7c, 0x7f, 0x8a, 0xff, 0x37, 0x90, 0xf7, 0x3b, - 0x21, 0xe0, 0xf6, 0x8d, 0xa1, 0x85, 0x0c, 0xb9, 0x5a, 0x9f, 0xa9, 0x51, 0xec, 0x42, 0xb6, 0xf6, - 0x3c, 0x0c, 0x37, 0xfc, 0x96, 0x17, 0x09, 0xbb, 0x47, 0x61, 0x3c, 0xc5, 0x8c, 0x86, 0x56, 0xb4, - 0x72, 0x6c, 0x60, 0x25, 0x24, 0x72, 0x03, 0xf7, 0x2c, 0x91, 0x7b, 0x0b, 0x86, 0x3d, 0xcd, 0x25, - 0xa0, 0xdd, 0x0b, 0x56, 0x48, 0x17, 0x35, 0x6c, 0xde, 0x4b, 0xbd, 0x04, 0x1b, 0xd4, 0xda, 0x4b, - 0xcb, 0xe0, 0x9d, 0x49, 0xcb, 0x0e, 0xf5, 0x49, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc5, 0xc0, 0xa5, - 0x72, 0xaf, 0x98, 0x52, 0xb9, 0xb3, 0x49, 0xa9, 0x5c, 0x4a, 0x55, 0x65, 0x08, 0xe4, 0xba, 0xcf, - 0x28, 0xd9, 0x75, 0x80, 0xe7, 0xef, 0xb5, 0xe0, 0x38, 0xd3, 0x7d, 0xd0, 0x06, 0xde, 0xb1, 0xbe, - 0x83, 0x99, 0xa4, 0x5e, 0xcd, 0x26, 0x87, 0xf3, 0xda, 0xb1, 0xeb, 0x70, 0xa6, 0xd3, 0xbd, 0xcb, - 0x2c, 0x7c, 0x6b, 0xca, 0x38, 0x22, 0xb6, 0xf0, 0xad, 0x2d, 0x2f, 0x60, 0x06, 0xe9, 0x36, 0x7c, - 0xa1, 0xfd, 0xdf, 0x2d, 0x28, 0x96, 0xfd, 0xda, 0x21, 0xbc, 0xe8, 0x3f, 0x62, 0xbc, 0xe8, 0x1f, - 0xca, 0xbe, 0xf1, 0x6b, 0xb9, 0xca, 0xbe, 0xc5, 0x84, 0xb2, 0xef, 0x54, 0x1e, 0x81, 0xf6, 0xaa, - 0xbd, 0x9f, 0x2e, 0xc2, 0x50, 0xd9, 0xaf, 0xa9, 0x7d, 0xf6, 0xaf, 0xee, 0xc5, 0x91, 0x27, 0x37, - 0xfb, 0x94, 0x46, 0x99, 0x59, 0xf4, 0xca, 0xb8, 0x13, 0xdf, 0x66, 0xfe, 0x3c, 0x37, 0x89, 0xbb, - 0xb1, 0x19, 0x91, 0x5a, 0xf2, 0x73, 0x0e, 0xcf, 0x9f, 0xe7, 0x9b, 0x45, 0x18, 0x4b, 0xb4, 0x8e, - 0xea, 0x30, 0x52, 0xd7, 0x55, 0x49, 0x62, 0x9d, 0xde, 0x93, 0x16, 0x4a, 0xf8, 0x43, 0x68, 0x45, - 0xd8, 0x24, 0x8e, 0x66, 0x00, 0x3c, 0xdd, 0x2a, 0x5c, 0x05, 0x2a, 0xd6, 0x2c, 0xc2, 0x35, 0x0c, - 0xf4, 0x02, 0x0c, 0x45, 0x7e, 0xd3, 0xaf, 0xfb, 0x1b, 0x3b, 0x57, 0x88, 0x8c, 0x6c, 0xa9, 0x8c, - 0x86, 0xd7, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x1d, 0x98, 0x50, 0x44, 0x2a, 0xf7, 0x41, 0xbd, 0xc6, - 0xc4, 0x26, 0xab, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x09, 0x46, 0x99, 0xf5, 0x32, 0xab, 0x7f, - 0x85, 0xec, 0xc8, 0x88, 0xc7, 0x8c, 0xc3, 0x5e, 0x31, 0x20, 0x38, 0x81, 0x89, 0xe6, 0x61, 0xa2, - 0xe1, 0x86, 0x89, 0xea, 0x7d, 0xac, 0x3a, 0xeb, 0xc0, 0x4a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0xcf, - 0x89, 0x39, 0xf6, 0x22, 0xf7, 0xfd, 0xed, 0xf8, 0xee, 0xde, 0x8e, 0xdf, 0xb0, 0x60, 0x9c, 0xb6, - 0xce, 0x4c, 0x32, 0x25, 0x23, 0xa5, 0x72, 0x62, 0x58, 0x6d, 0x72, 0x62, 0x9c, 0xa5, 0xc7, 0x76, - 0xcd, 0x6f, 0x45, 0x42, 0x3a, 0xaa, 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, - 0xfc, 0xde, 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xca, 0x8c, 0x9e, 0xec, 0x94, 0x19, 0x3c, - 0xf2, 0xb9, 0xb0, 0x82, 0x13, 0x2c, 0xad, 0x16, 0xf9, 0x5c, 0x9a, 0xc7, 0xc5, 0x38, 0xf6, 0x57, - 0x8b, 0x30, 0x5c, 0xf6, 0x6b, 0xb1, 0x61, 0xc7, 0xf3, 0x86, 0x61, 0xc7, 0x99, 0x84, 0x61, 0xc7, - 0xb8, 0x8e, 0xfb, 0xbe, 0x19, 0xc7, 0xb7, 0xca, 0x8c, 0xe3, 0x37, 0x2d, 0x36, 0x6b, 0x0b, 0xab, - 0x15, 0x6e, 0xe1, 0x8b, 0x2e, 0xc0, 0x10, 0x3b, 0xe1, 0x58, 0xa0, 0x05, 0x69, 0xed, 0xc0, 0x52, - 0x58, 0xae, 0xc6, 0xc5, 0x58, 0xc7, 0x41, 0xe7, 0x60, 0x20, 0x24, 0x4e, 0x50, 0xdd, 0x54, 0xc7, - 0xbb, 0x30, 0x4d, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xd0, 0xed, 0x62, 0xbe, 0xb9, 0xb0, - 0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x23, 0x6d, 0xdb, 0x37, 0x01, 0xa5, 0xf1, 0xbb, 0xf0, 0xbf, 0x2a, - 0x99, 0x61, 0x61, 0x07, 0x53, 0x21, 0x61, 0xff, 0xda, 0x82, 0xd1, 0xb2, 0x5f, 0xa3, 0x5b, 0xf7, - 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x02, 0xbd, 0x65, 0xbf, 0xd6, - 0x21, 0x74, 0xed, 0x3f, 0xb0, 0xa0, 0xbf, 0xec, 0xd7, 0x0e, 0x41, 0xf1, 0xf2, 0x8a, 0xa9, 0x78, - 0x39, 0x9e, 0xb3, 0x6e, 0x72, 0x74, 0x2d, 0x7f, 0xaf, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x0d, 0x39, - 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x3b, 0x39, 0xad, - 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x06, 0x9a, 0x01, 0xd9, 0x76, 0x7d, 0xc1, 0x5f, 0x6b, - 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, 0xf7, - 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x9b, 0x30, 0xc8, 0xfe, - 0xb3, 0x63, 0xa7, 0xf7, 0xc0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x16, - 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, 0x58, - 0xe8, 0x29, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xd5, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, 0xeb, - 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x89, 0x44, 0x6a, 0xba, 0x22, - 0xe7, 0x05, 0xaf, 0xaa, 0x52, 0xac, 0x61, 0xa0, 0x4d, 0x38, 0xe9, 0x7a, 0x2c, 0x85, 0x14, 0xa9, - 0x6c, 0xb9, 0xcd, 0xb5, 0xab, 0x95, 0x1b, 0x24, 0x70, 0xd7, 0x77, 0xe6, 0x9c, 0xea, 0x16, 0xf1, - 0x64, 0x42, 0xfc, 0x47, 0x45, 0x17, 0x4f, 0x2e, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0xd9, 0xcf, 0xb1, - 0xf5, 0x7e, 0xad, 0x82, 0x9e, 0x34, 0x8e, 0x8e, 0x63, 0xfa, 0xd1, 0xb1, 0xbf, 0x5b, 0xea, 0xbb, - 0x56, 0xd1, 0x62, 0xff, 0x5c, 0x84, 0xa3, 0x65, 0xbf, 0x56, 0xf6, 0x83, 0x68, 0xc9, 0x0f, 0x6e, - 0x3b, 0x41, 0x4d, 0x2e, 0xaf, 0x92, 0x8c, 0x7e, 0x44, 0xcf, 0xcf, 0x5e, 0x7e, 0xba, 0x18, 0x91, - 0x8d, 0x9e, 0x63, 0x1c, 0xdb, 0x01, 0x9d, 0x4d, 0xab, 0x8c, 0x77, 0x50, 0x49, 0xd8, 0x2e, 0x39, - 0x11, 0x41, 0xd7, 0x60, 0xa4, 0xaa, 0x5f, 0xa3, 0xa2, 0xfa, 0x13, 0xf2, 0x22, 0x33, 0xee, 0xd8, - 0xcc, 0x7b, 0xd7, 0xac, 0x6f, 0x7f, 0xdd, 0x12, 0xad, 0x70, 0x49, 0x04, 0xb7, 0x69, 0xed, 0x7c, - 0x9e, 0xce, 0xc3, 0x44, 0xa0, 0x57, 0xd1, 0x6c, 0xc3, 0x8e, 0xf2, 0xac, 0x36, 0x09, 0x20, 0x4e, - 0xe3, 0xa3, 0x8f, 0xc3, 0x09, 0xa3, 0x50, 0xaa, 0xc9, 0xb5, 0xdc, 0xd2, 0x4c, 0x56, 0x83, 0xf3, - 0x90, 0x70, 0x7e, 0x7d, 0xfb, 0xbb, 0xe1, 0x58, 0xf2, 0xbb, 0x84, 0xf4, 0xe4, 0x1e, 0xbf, 0xae, - 0x70, 0xb0, 0xaf, 0xb3, 0x5f, 0x80, 0x09, 0xfa, 0xac, 0x56, 0x2c, 0x22, 0x9b, 0xbf, 0xce, 0x01, - 0xa6, 0xfe, 0x5d, 0x3f, 0xbb, 0xe2, 0x12, 0x99, 0xd5, 0xd0, 0x27, 0x61, 0x34, 0x24, 0x2c, 0xaa, - 0x9a, 0x94, 0xda, 0xb5, 0xf1, 0x14, 0xaf, 0x2c, 0xea, 0x98, 0xfc, 0x65, 0x62, 0x96, 0xe1, 0x04, - 0x35, 0xd4, 0x80, 0xd1, 0xdb, 0xae, 0x57, 0xf3, 0x6f, 0x87, 0x92, 0xfe, 0x40, 0xbe, 0x0a, 0xe0, - 0x26, 0xc7, 0x4c, 0xf4, 0xd1, 0x68, 0xee, 0xa6, 0x41, 0x0c, 0x27, 0x88, 0xd3, 0x63, 0x24, 0x68, - 0x79, 0xb3, 0xe1, 0xf5, 0x90, 0x04, 0x22, 0xe6, 0x1b, 0x3b, 0x46, 0xb0, 0x2c, 0xc4, 0x31, 0x9c, - 0x1e, 0x23, 0xec, 0x0f, 0x73, 0x35, 0x67, 0xe7, 0x94, 0x38, 0x46, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, - 0x3d, 0x66, 0xd9, 0xbf, 0x55, 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0x83, 0x99, 0xa5, 0xa1, 0xd4, 0xca, - 0xb1, 0x81, 0x95, 0x13, 0x61, 0xae, 0xe7, 0xa0, 0x11, 0xe6, 0x50, 0xd4, 0xc6, 0xbb, 0x9e, 0x47, - 0x3a, 0xbe, 0xd8, 0xce, 0xbb, 0x7e, 0xff, 0x9e, 0x3c, 0xef, 0xe9, 0x3d, 0xbf, 0x2e, 0x06, 0xa8, - 0x97, 0x87, 0xd0, 0x63, 0x4a, 0xca, 0x0a, 0x1f, 0x1d, 0x09, 0x43, 0x8b, 0xd0, 0x1f, 0xee, 0x84, - 0xd5, 0xa8, 0x1e, 0xb6, 0x4b, 0x35, 0x5a, 0x61, 0x28, 0x5a, 0xa6, 0x6b, 0x5e, 0x05, 0xcb, 0xba, - 0xa8, 0x0a, 0x93, 0x82, 0xe2, 0xfc, 0xa6, 0xe3, 0xa9, 0x04, 0x88, 0xdc, 0x1a, 0xf1, 0xc2, 0xde, - 0x6e, 0x69, 0x52, 0xb4, 0xac, 0x83, 0xf7, 0x77, 0x4b, 0x74, 0x4b, 0x66, 0x40, 0x70, 0x16, 0x35, - 0xbe, 0xe4, 0xab, 0x55, 0xbf, 0xd1, 0x2c, 0x07, 0xfe, 0xba, 0x5b, 0x27, 0xed, 0x14, 0xbd, 0x15, - 0x03, 0x53, 0x2c, 0x79, 0xa3, 0x0c, 0x27, 0xa8, 0xa1, 0x5b, 0x30, 0xe6, 0x34, 0x9b, 0xb3, 0x41, - 0xc3, 0x0f, 0x64, 0x03, 0x43, 0xf9, 0x1a, 0x83, 0x59, 0x13, 0x95, 0xe7, 0x3f, 0x4c, 0x14, 0xe2, - 0x24, 0x41, 0xfb, 0xbb, 0x18, 0xbf, 0x5d, 0x71, 0x37, 0x3c, 0xe6, 0x93, 0x86, 0x1a, 0x30, 0xd2, - 0x64, 0x27, 0xb2, 0x48, 0x1b, 0x26, 0x76, 0xf1, 0xf3, 0x5d, 0xca, 0x0c, 0x6f, 0xb3, 0xc4, 0xa7, - 0x86, 0xed, 0x68, 0x59, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0xbf, 0x3f, 0xc1, 0x38, 0xb6, 0x0a, 0x17, - 0x04, 0xf6, 0x0b, 0x0f, 0x41, 0xf1, 0xf4, 0x9f, 0xce, 0x17, 0xb9, 0xc7, 0x53, 0x2f, 0xbc, 0x0c, - 0xb1, 0xac, 0x8b, 0x3e, 0x01, 0xa3, 0xf4, 0x25, 0xad, 0xb8, 0xa6, 0x70, 0xea, 0x48, 0x7e, 0xe8, - 0x29, 0x85, 0xa5, 0xa7, 0x14, 0xd4, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x06, 0x33, 0xa7, 0x94, 0xa4, - 0x0b, 0xdd, 0x90, 0xd6, 0x2d, 0x27, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc1, 0x64, 0x3a, 0x71, 0x72, - 0x38, 0x65, 0xe7, 0x3f, 0x49, 0xd2, 0xb9, 0x8f, 0xe3, 0xdc, 0x6f, 0x69, 0x58, 0x88, 0xb3, 0xe8, - 0xa3, 0xab, 0xc9, 0xb4, 0xb6, 0x45, 0x43, 0x58, 0x9f, 0x4a, 0x6d, 0x3b, 0xd2, 0x36, 0xa3, 0xed, - 0x06, 0x9c, 0xd2, 0x32, 0x83, 0x5e, 0x0a, 0x1c, 0x66, 0xce, 0xe3, 0xb2, 0x8b, 0x42, 0xe3, 0x25, - 0x1f, 0xde, 0xdb, 0x2d, 0x9d, 0x5a, 0x6b, 0x87, 0x88, 0xdb, 0xd3, 0x41, 0xd7, 0xe0, 0x28, 0x0f, - 0x9c, 0xb2, 0x40, 0x9c, 0x5a, 0xdd, 0xf5, 0x14, 0xb3, 0xca, 0x8f, 0x95, 0x13, 0x7b, 0xbb, 0xa5, - 0xa3, 0xb3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x05, 0x06, 0x6b, 0x9e, 0x3c, 0x00, 0xfb, 0x8c, - 0xe4, 0xab, 0x83, 0x0b, 0xab, 0x15, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, 0x57, 0x40, 0x1b, 0x5c, 0x5b, - 0xa4, 0x44, 0x7c, 0xfd, 0xa9, 0x78, 0x9a, 0x49, 0x29, 0xb8, 0x11, 0x89, 0x80, 0xab, 0x49, 0x95, - 0xa7, 0x9b, 0x11, 0xa4, 0xc0, 0x20, 0x8c, 0x5e, 0x07, 0x24, 0x92, 0xfc, 0xcc, 0x56, 0x59, 0x4e, - 0x3a, 0xcd, 0x84, 0x53, 0xbd, 0xdc, 0x2b, 0x29, 0x0c, 0x9c, 0x51, 0x0b, 0x5d, 0xa6, 0x27, 0x97, - 0x5e, 0x2a, 0x4e, 0x46, 0x95, 0xe2, 0x7b, 0x81, 0x34, 0x03, 0xc2, 0xac, 0x0e, 0x4d, 0x8a, 0x38, - 0x51, 0x0f, 0xd5, 0xe0, 0xa4, 0xd3, 0x8a, 0x7c, 0xa6, 0x88, 0x33, 0x51, 0xd7, 0xfc, 0x2d, 0xe2, - 0x31, 0x1d, 0xf8, 0x00, 0x8b, 0xd3, 0x79, 0x72, 0xb6, 0x0d, 0x1e, 0x6e, 0x4b, 0x85, 0xbe, 0x62, - 0xe8, 0x58, 0x68, 0x3a, 0x32, 0xc3, 0xa9, 0x9a, 0x2b, 0x8e, 0x25, 0x06, 0x7a, 0x01, 0x86, 0x36, - 0xfd, 0x30, 0x5a, 0x25, 0xd1, 0x6d, 0x3f, 0xd8, 0x12, 0x59, 0x05, 0xe2, 0x4c, 0x2e, 0x31, 0x08, - 0xeb, 0x78, 0xe8, 0x09, 0xe8, 0x67, 0x16, 0x5a, 0xcb, 0x0b, 0xec, 0x1a, 0x1c, 0x88, 0xcf, 0x98, - 0xcb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0x2e, 0xcf, 0x33, 0x43, 0x97, 0x04, 0xea, 0x72, 0x79, - 0x1e, 0x4b, 0x38, 0x5d, 0xae, 0xe1, 0xa6, 0x13, 0x90, 0x72, 0xe0, 0x57, 0x49, 0xa8, 0xe5, 0x0f, - 0x7a, 0x88, 0xe7, 0x4c, 0xa0, 0xcb, 0xb5, 0x92, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0xce, 0x8a, - 0x3b, 0x9a, 0xaf, 0xa1, 0x4c, 0x73, 0x6a, 0x5d, 0x26, 0xc6, 0xf5, 0x60, 0x5c, 0xe5, 0xe3, 0xe5, - 0x59, 0x12, 0xc2, 0xa9, 0x31, 0xb6, 0xb6, 0xbb, 0x4f, 0xb1, 0xa0, 0x74, 0xbe, 0xcb, 0x09, 0x4a, - 0x38, 0x45, 0xdb, 0x08, 0x04, 0x3b, 0xde, 0x31, 0x10, 0xec, 0x79, 0x18, 0x0c, 0x5b, 0xb7, 0x6a, - 0x7e, 0xc3, 0x71, 0x3d, 0x66, 0xe8, 0xa2, 0xbd, 0x97, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x04, - 0x03, 0x8e, 0x54, 0xe8, 0xa2, 0xfc, 0x18, 0x77, 0x4a, 0x8d, 0xcb, 0xc3, 0x3e, 0x49, 0x15, 0xae, - 0xaa, 0x8b, 0x5e, 0x86, 0x11, 0x11, 0x47, 0x43, 0xa4, 0xb0, 0x9f, 0x34, 0xbd, 0x86, 0x2b, 0x3a, - 0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x0c, 0x45, 0x7e, 0x9d, 0xb9, 0xbe, 0x52, 0x06, 0xf6, 0x58, 0x7e, - 0x28, 0xda, 0x35, 0x85, 0xa6, 0xab, 0x1a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x6b, 0x7c, 0xbd, 0xb3, - 0x6c, 0x41, 0x24, 0x14, 0x39, 0xd0, 0x4f, 0xe5, 0x59, 0x29, 0x32, 0x34, 0x73, 0x3b, 0x88, 0x9a, - 0x58, 0x27, 0x83, 0x2e, 0xc1, 0x44, 0x33, 0x70, 0x7d, 0xb6, 0x26, 0x94, 0x82, 0x7a, 0xca, 0xcc, - 0x0d, 0x5a, 0x4e, 0x22, 0xe0, 0x74, 0x1d, 0x16, 0x06, 0x45, 0x14, 0x4e, 0x9d, 0xe0, 0xf9, 0xcd, - 0xb8, 0xf8, 0x81, 0x97, 0x61, 0x05, 0x45, 0x2b, 0xec, 0x24, 0xe6, 0x92, 0xb3, 0xa9, 0xe9, 0x7c, - 0xe7, 0x7a, 0x5d, 0xc2, 0xc6, 0xd9, 0x72, 0xf5, 0x17, 0xc7, 0x14, 0x50, 0x4d, 0x4b, 0x2b, 0x4e, - 0x1f, 0x37, 0xe1, 0xd4, 0xc9, 0x36, 0x66, 0xb2, 0x89, 0x97, 0x6c, 0xcc, 0x10, 0x18, 0xc5, 0x21, - 0x4e, 0xd0, 0x44, 0xaf, 0xc1, 0xb8, 0x88, 0x11, 0x10, 0x0f, 0xd3, 0xa9, 0xd8, 0x95, 0x08, 0x27, - 0x60, 0x38, 0x85, 0xcd, 0xf3, 0x8b, 0x39, 0xb7, 0xea, 0x44, 0x1c, 0x7d, 0x57, 0x5d, 0x6f, 0x2b, - 0x9c, 0x3a, 0xcd, 0xce, 0x07, 0x91, 0x5f, 0x2c, 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x1a, 0x8c, 0x37, - 0x03, 0x42, 0x1a, 0xec, 0x09, 0x23, 0xee, 0xb3, 0x12, 0x8f, 0x02, 0x44, 0x7b, 0x52, 0x4e, 0xc0, - 0xf6, 0x33, 0xca, 0x70, 0x8a, 0x02, 0xba, 0x0d, 0x03, 0xfe, 0x36, 0x09, 0x36, 0x89, 0x53, 0x9b, - 0x3a, 0xd3, 0xc6, 0xc1, 0x4d, 0x5c, 0x6e, 0xd7, 0x04, 0x6e, 0xc2, 0xfe, 0x47, 0x16, 0x77, 0xb6, - 0xff, 0x91, 0x8d, 0xa1, 0xbf, 0x65, 0xc1, 0x09, 0xa9, 0x51, 0xab, 0x34, 0xe9, 0xa8, 0xcf, 0xfb, - 0x5e, 0x18, 0x05, 0x3c, 0x6e, 0xcd, 0xc3, 0xf9, 0xb1, 0x5c, 0xd6, 0x72, 0x2a, 0x29, 0xe1, 0xfd, - 0x89, 0x3c, 0x8c, 0x10, 0xe7, 0xb7, 0x48, 0x1f, 0xdd, 0x21, 0x89, 0xe4, 0x61, 0x34, 0x1b, 0x2e, - 0xbd, 0xb1, 0xb0, 0x3a, 0xf5, 0x08, 0x0f, 0xba, 0x43, 0x37, 0x43, 0x25, 0x09, 0xc4, 0x69, 0x7c, - 0x74, 0x01, 0x0a, 0x7e, 0x38, 0xf5, 0x68, 0x9b, 0x4c, 0xf4, 0x7e, 0xed, 0x5a, 0x85, 0xdb, 0x81, - 0x5e, 0xab, 0xe0, 0x82, 0x1f, 0xca, 0x1c, 0x5f, 0xf4, 0xa5, 0x19, 0x4e, 0x3d, 0xc6, 0x45, 0xbd, - 0x32, 0xc7, 0x17, 0x2b, 0xc4, 0x31, 0x1c, 0x6d, 0xc2, 0x58, 0x68, 0xbc, 0xe8, 0xc3, 0xa9, 0xb3, - 0x6c, 0xa4, 0x1e, 0xcb, 0x9b, 0x34, 0x03, 0x5b, 0x4b, 0xbe, 0x63, 0x52, 0xc1, 0x49, 0xb2, 0x7c, - 0x77, 0x69, 0x32, 0x85, 0x70, 0xea, 0xf1, 0x0e, 0xbb, 0x4b, 0x43, 0xd6, 0x77, 0x97, 0x4e, 0x03, - 0x27, 0x68, 0x4e, 0x7f, 0x07, 0x4c, 0xa4, 0xd8, 0xa5, 0x83, 0xf8, 0x3c, 0x4c, 0x6f, 0xc1, 0x88, - 0xb1, 0x24, 0x1f, 0xa8, 0x49, 0xcc, 0xef, 0x0e, 0xc2, 0xa0, 0x32, 0x55, 0x40, 0xe7, 0x4d, 0x2b, - 0x98, 0x13, 0x49, 0x2b, 0x98, 0x81, 0xb2, 0x5f, 0x33, 0x0c, 0x5f, 0xd6, 0x32, 0x62, 0xc9, 0xe6, - 0x1d, 0x80, 0xdd, 0x3b, 0x66, 0x69, 0xea, 0x97, 0x62, 0xd7, 0xe6, 0x34, 0x3d, 0x6d, 0x35, 0x3a, - 0x97, 0x60, 0xc2, 0xf3, 0x19, 0x8f, 0x4e, 0x6a, 0x92, 0x01, 0x63, 0x7c, 0xd6, 0xa0, 0x1e, 0xeb, - 0x2c, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x51, 0x4a, 0xaa, 0x90, 0x38, 0x1f, 0x85, 0x05, - 0x94, 0xbe, 0x0d, 0xf9, 0xaf, 0x70, 0x6a, 0x3c, 0xff, 0x6d, 0xc8, 0x2b, 0x25, 0x99, 0xb1, 0x50, - 0x32, 0x63, 0x4c, 0x63, 0xd2, 0xf4, 0x6b, 0xcb, 0x65, 0xc1, 0xe6, 0x6b, 0x51, 0xde, 0x6b, 0xcb, - 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x43, 0x26, 0x6f, 0x9b, 0x2e, 0x97, 0xb5, - 0x1c, 0xa3, 0xac, 0x02, 0x16, 0x15, 0x99, 0x44, 0x9c, 0xbe, 0x8d, 0x98, 0x44, 0xbc, 0xff, 0x1e, - 0x25, 0xe2, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x3b, 0x70, 0xd4, 0x78, 0x8f, 0x2a, 0x4f, 0x35, 0xc8, - 0x57, 0x96, 0x27, 0x90, 0xe7, 0x4e, 0x89, 0x4e, 0x1f, 0x5d, 0xce, 0xa2, 0x84, 0xb3, 0x1b, 0x40, - 0x75, 0x98, 0xa8, 0xa6, 0x5a, 0x1d, 0xe8, 0xbe, 0x55, 0xb5, 0x2e, 0xd2, 0x2d, 0xa6, 0x09, 0xa3, - 0x97, 0x61, 0xe0, 0x6d, 0x9f, 0x1b, 0xb6, 0x89, 0xa7, 0x89, 0x8c, 0x92, 0x32, 0xf0, 0xc6, 0xb5, - 0x0a, 0x2b, 0xdf, 0xdf, 0x2d, 0x0d, 0x95, 0xfd, 0x9a, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x03, 0x16, - 0x4c, 0xa7, 0x1f, 0xbc, 0xaa, 0xd3, 0x23, 0xdd, 0x77, 0xda, 0x16, 0x8d, 0x4e, 0x2f, 0xe6, 0x92, - 0xc3, 0x6d, 0x9a, 0x42, 0x1f, 0xa6, 0xfb, 0x29, 0x74, 0xef, 0x12, 0x91, 0xa0, 0xfd, 0xe1, 0x78, - 0x3f, 0xd1, 0xd2, 0xfd, 0xdd, 0xd2, 0x18, 0x3f, 0x19, 0xdd, 0xbb, 0x2a, 0x1e, 0x3d, 0xaf, 0x80, - 0xbe, 0x1b, 0x8e, 0x06, 0x69, 0xd9, 0x30, 0x91, 0x4c, 0xf8, 0x93, 0xdd, 0x9c, 0xb2, 0xc9, 0x09, - 0xc7, 0x59, 0x04, 0x71, 0x76, 0x3b, 0xf6, 0xaf, 0x59, 0x4c, 0x27, 0x20, 0xba, 0x45, 0xc2, 0x56, - 0x3d, 0x3a, 0x04, 0x63, 0xb2, 0x45, 0x43, 0xdf, 0x7e, 0xcf, 0xd6, 0x60, 0xff, 0xd2, 0x62, 0xd6, - 0x60, 0x87, 0xe8, 0xd7, 0xf6, 0x06, 0x0c, 0x44, 0xa2, 0x35, 0xd1, 0xf5, 0x3c, 0xcb, 0x15, 0xd9, - 0x29, 0x66, 0x11, 0xa7, 0x1e, 0x39, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, 0xe3, 0x33, 0x20, 0x21, - 0x87, 0xa0, 0xd6, 0x5c, 0x30, 0xd5, 0x9a, 0xa5, 0x0e, 0x5f, 0x90, 0xa3, 0xde, 0xfc, 0xa7, 0x66, - 0xbf, 0x99, 0x70, 0xef, 0xdd, 0x6e, 0x86, 0x68, 0x7f, 0xde, 0x02, 0x88, 0x13, 0x80, 0x74, 0x91, - 0x90, 0xf9, 0x22, 0x7d, 0xd6, 0xf8, 0x91, 0x5f, 0xf5, 0xeb, 0x42, 0xf5, 0x72, 0x32, 0xd6, 0xac, - 0xf2, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0x23, 0xf2, 0x16, 0x63, 0x5d, 0xbf, 0x11, - 0x8d, 0xf7, 0x4b, 0x16, 0x1c, 0xc9, 0x72, 0x92, 0xa0, 0x8f, 0x64, 0x2e, 0xe6, 0x54, 0x26, 0xa2, - 0x6a, 0x36, 0x6f, 0x88, 0x72, 0xac, 0x30, 0xba, 0xce, 0x74, 0x7d, 0xb0, 0xe4, 0x14, 0xd7, 0x60, - 0xa4, 0x1c, 0x10, 0x8d, 0xbf, 0x78, 0x35, 0xce, 0x9b, 0x33, 0x38, 0xf7, 0xf4, 0x81, 0x23, 0x0f, - 0xd9, 0x5f, 0x2e, 0xc0, 0x11, 0x6e, 0xe8, 0x34, 0xbb, 0xed, 0xbb, 0xb5, 0xb2, 0x5f, 0x13, 0xae, - 0xad, 0x6f, 0xc2, 0x70, 0x53, 0x93, 0x4d, 0xb7, 0x0b, 0xb4, 0xae, 0xcb, 0xb0, 0x63, 0x69, 0x9a, - 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x06, 0xc3, 0x64, 0xdb, 0xad, 0x2a, 0x6b, 0x99, 0xc2, 0x81, 0x2f, - 0x69, 0xd5, 0xca, 0xa2, 0x46, 0x07, 0x1b, 0x54, 0xbb, 0x36, 0x4f, 0xd6, 0x58, 0xb4, 0x9e, 0x0e, - 0x16, 0x32, 0x3f, 0x6e, 0xc1, 0xf1, 0x9c, 0xb0, 0xec, 0xb4, 0xb9, 0xdb, 0xcc, 0xa4, 0x4c, 0x2c, - 0x5b, 0xd5, 0x1c, 0x37, 0x34, 0xc3, 0x02, 0x8a, 0x3e, 0x0a, 0xd0, 0x8c, 0x53, 0x52, 0x76, 0x88, - 0x5f, 0x6d, 0x44, 0xb2, 0xd5, 0x82, 0x92, 0xaa, 0xcc, 0x95, 0x1a, 0x2d, 0xfb, 0x4b, 0x3d, 0xd0, - 0xcb, 0x0c, 0x93, 0x50, 0x19, 0xfa, 0x37, 0x79, 0xcc, 0xbc, 0xb6, 0xf3, 0x46, 0x71, 0x65, 0x10, - 0xbe, 0x78, 0xde, 0xb4, 0x52, 0x2c, 0xc9, 0xa0, 0x15, 0x98, 0xe4, 0xe9, 0x36, 0xeb, 0x0b, 0xa4, - 0xee, 0xec, 0x48, 0xb1, 0x6f, 0x81, 0x7d, 0xaa, 0x12, 0x7f, 0x2f, 0xa7, 0x51, 0x70, 0x56, 0x3d, - 0xf4, 0x2a, 0x8c, 0xd2, 0x67, 0xb8, 0xdf, 0x8a, 0x24, 0x25, 0x9e, 0xdf, 0x52, 0xbd, 0x4c, 0xd6, - 0x0c, 0x28, 0x4e, 0x60, 0xa3, 0x97, 0x61, 0xa4, 0x99, 0x12, 0x70, 0xf7, 0xc6, 0x92, 0x20, 0x53, - 0xa8, 0x6d, 0xe2, 0x32, 0x3f, 0x89, 0x16, 0xf3, 0x0a, 0x59, 0xdb, 0x0c, 0x48, 0xb8, 0xe9, 0xd7, - 0x6b, 0x8c, 0x03, 0xee, 0xd5, 0xfc, 0x24, 0x12, 0x70, 0x9c, 0xaa, 0x41, 0xa9, 0xac, 0x3b, 0x6e, - 0xbd, 0x15, 0x90, 0x98, 0x4a, 0x9f, 0x49, 0x65, 0x29, 0x01, 0xc7, 0xa9, 0x1a, 0x9d, 0x25, 0xf7, - 0xfd, 0xf7, 0x47, 0x72, 0x6f, 0xff, 0x4c, 0x01, 0x8c, 0xa9, 0x7d, 0x0f, 0xe7, 0xdd, 0x7c, 0x05, - 0x7a, 0x36, 0x82, 0x66, 0x55, 0x18, 0xe1, 0x65, 0x7e, 0x59, 0x9c, 0xfd, 0x9f, 0x7f, 0x19, 0xfd, - 0x8f, 0x59, 0x2d, 0xba, 0xc7, 0x8f, 0x96, 0x03, 0x9f, 0x5e, 0x72, 0x32, 0xac, 0xa6, 0x72, 0x47, - 0xea, 0x97, 0x81, 0x35, 0xda, 0x04, 0xa0, 0x16, 0x3e, 0x15, 0x9c, 0x82, 0x61, 0xaf, 0x56, 0x11, - 0xe1, 0x73, 0x24, 0x15, 0x74, 0x01, 0x86, 0x44, 0x2a, 0x44, 0xe6, 0x35, 0xc3, 0x37, 0x13, 0xb3, - 0xaf, 0x5b, 0x88, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x60, 0x01, 0x26, 0x33, 0xdc, 0x1e, 0xf9, 0x35, - 0xb2, 0xe1, 0x86, 0x51, 0xb0, 0x93, 0xbc, 0x9c, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x9e, 0x55, 0xfc, - 0xa2, 0x4a, 0x5e, 0x4e, 0xc2, 0xad, 0x48, 0x40, 0x0f, 0x98, 0xaa, 0xff, 0x0c, 0xf4, 0xb4, 0x42, - 0x22, 0x63, 0xdd, 0xab, 0x6b, 0x9b, 0x29, 0xec, 0x19, 0x84, 0x3e, 0x01, 0x37, 0x94, 0x16, 0x5a, - 0x7b, 0x02, 0x72, 0x3d, 0x34, 0x87, 0xd1, 0xce, 0x45, 0xc4, 0x73, 0xbc, 0x48, 0x3c, 0x14, 0xe3, - 0x18, 0xc8, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xc8, 0x75, 0x84, 0xa6, 0x5d, 0x6f, - 0xf8, 0x9e, 0x1b, 0xf9, 0xca, 0x70, 0x91, 0xc7, 0x3d, 0x26, 0xcd, 0xcd, 0x15, 0x51, 0x8e, 0x15, - 0x06, 0x3a, 0x0b, 0xbd, 0x4c, 0x28, 0x9e, 0x4c, 0x83, 0x86, 0xe7, 0x16, 0x78, 0x44, 0x49, 0x0e, - 0xd6, 0x6e, 0xf5, 0x62, 0xdb, 0x5b, 0xfd, 0x11, 0xca, 0xc1, 0xf8, 0xf5, 0xe4, 0x85, 0x42, 0xbb, - 0xeb, 0xfb, 0x75, 0xcc, 0x80, 0xe8, 0x31, 0x31, 0x5e, 0x09, 0x4b, 0x3d, 0xec, 0xd4, 0xfc, 0x50, - 0x1b, 0xb4, 0x27, 0xa0, 0x7f, 0x8b, 0xec, 0x04, 0xae, 0xb7, 0x91, 0xb4, 0xe0, 0xbc, 0xc2, 0x8b, - 0xb1, 0x84, 0x9b, 0x59, 0xbf, 0xfb, 0xef, 0x47, 0xd6, 0x6f, 0x7d, 0x05, 0x0c, 0x74, 0x64, 0x4f, - 0x7e, 0xa8, 0x08, 0x63, 0x78, 0x6e, 0xe1, 0xfd, 0x89, 0xb8, 0x9e, 0x9e, 0x88, 0xfb, 0x91, 0x1c, - 0xfb, 0x60, 0xb3, 0xf1, 0xcb, 0x16, 0x8c, 0xb1, 0x84, 0x8c, 0x22, 0x8a, 0x89, 0xeb, 0x7b, 0x87, - 0xf0, 0x14, 0x78, 0x04, 0x7a, 0x03, 0xda, 0xa8, 0x98, 0x41, 0xb5, 0xc7, 0x59, 0x4f, 0x30, 0x87, - 0xa1, 0x93, 0xd0, 0xc3, 0xba, 0x40, 0x27, 0x6f, 0x98, 0x1f, 0xc1, 0x0b, 0x4e, 0xe4, 0x60, 0x56, - 0xca, 0xe2, 0x29, 0x62, 0xd2, 0xac, 0xbb, 0xbc, 0xd3, 0xb1, 0xc9, 0xc2, 0xbb, 0x23, 0x44, 0x4a, - 0x66, 0xd7, 0xde, 0x59, 0x3c, 0xc5, 0x6c, 0x92, 0xed, 0x9f, 0xd9, 0x7f, 0x5e, 0x80, 0xd3, 0x99, - 0xf5, 0xba, 0x8e, 0xa7, 0xd8, 0xbe, 0xf6, 0x83, 0x4c, 0xdf, 0x56, 0x3c, 0x44, 0xfb, 0xf8, 0x9e, - 0x6e, 0xb9, 0xff, 0xde, 0x2e, 0xc2, 0x1c, 0x66, 0x0e, 0xd9, 0xbb, 0x24, 0xcc, 0x61, 0x66, 0xdf, - 0x72, 0xc4, 0x04, 0x7f, 0x53, 0xc8, 0xf9, 0x16, 0x26, 0x30, 0x38, 0x47, 0xcf, 0x19, 0x06, 0x0c, - 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a, 0x85, 0xb1, 0x86, 0xeb, 0xd1, 0xc3, - 0x67, 0xc7, 0x64, 0xc5, 0x95, 0x2e, 0x63, 0xc5, 0x04, 0xe3, 0x24, 0x3e, 0x72, 0xb5, 0x10, 0x88, - 0xfc, 0xeb, 0x5e, 0x3e, 0xd0, 0xae, 0x9b, 0x31, 0xcd, 0x39, 0xd4, 0x28, 0x66, 0x84, 0x43, 0x5c, - 0xd1, 0xe4, 0x44, 0xc5, 0xee, 0xe5, 0x44, 0xc3, 0xd9, 0x32, 0xa2, 0xe9, 0x97, 0x61, 0xe4, 0x9e, - 0x75, 0x23, 0xf6, 0x37, 0x8a, 0xf0, 0x50, 0x9b, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3, - 0x3e, 0x35, 0x0f, 0x65, 0x38, 0xb2, 0xde, 0xaa, 0xd7, 0x77, 0x98, 0x23, 0x18, 0xa9, 0x49, 0x0c, - 0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0x91, 0xa5, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, 0x4d, - 0xb2, 0xa3, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x82, 0x09, 0x67, 0xdb, - 0x71, 0x79, 0xfa, 0x0b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a, 0x36, 0x89, 0x80, 0xd3, 0x75, - 0xd0, 0xeb, 0x80, 0xfc, 0x5b, 0xcc, 0xb9, 0xa4, 0x76, 0x89, 0x78, 0x42, 0xeb, 0xce, 0xe6, 0xae, - 0x18, 0x1f, 0x09, 0xd7, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x18, 0x5f, 0x5f, 0x7e, 0x30, 0xbe, - 0xf6, 0xe7, 0x62, 0xc7, 0xcc, 0x81, 0x6f, 0xc1, 0xc8, 0x41, 0x2d, 0xa6, 0x9f, 0x80, 0xfe, 0x40, - 0xe4, 0x64, 0x4f, 0x78, 0x5d, 0xcb, 0x8c, 0xd5, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x94, 0x2c, 0xd9, - 0x8c, 0xbb, 0xfd, 0x32, 0x33, 0xff, 0xe6, 0x52, 0x70, 0x2d, 0xd4, 0xd6, 0x51, 0xcd, 0xfc, 0x3b, - 0x06, 0x62, 0x13, 0x97, 0x2f, 0xb7, 0x30, 0x8e, 0xf0, 0x60, 0x3c, 0x20, 0x44, 0x58, 0x50, 0x85, - 0x81, 0x3e, 0x06, 0xfd, 0x35, 0x77, 0xdb, 0x0d, 0x85, 0x1c, 0xed, 0xc0, 0x7a, 0xbb, 0xf8, 0xfb, - 0x16, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x62, 0x81, 0xd2, 0x4b, 0x5e, 0x26, 0x4e, 0x3d, 0xda, - 0x44, 0xaf, 0x01, 0x48, 0x0a, 0x4a, 0xf6, 0x26, 0xad, 0xa5, 0x00, 0x2b, 0xc8, 0xbe, 0xf1, 0x0f, - 0x6b, 0x75, 0xd0, 0xab, 0xd0, 0xb7, 0xc9, 0x68, 0x89, 0x6f, 0x3b, 0xab, 0x54, 0x5d, 0xac, 0x74, - 0x7f, 0xb7, 0x74, 0xc4, 0x6c, 0x53, 0xde, 0x62, 0xbc, 0x96, 0xfd, 0x43, 0x85, 0x78, 0x4e, 0xdf, - 0x68, 0xf9, 0x91, 0x73, 0x08, 0x9c, 0xc8, 0x25, 0x83, 0x13, 0x79, 0x2c, 0x7b, 0xa1, 0x6a, 0x5d, - 0xca, 0xe5, 0x40, 0xae, 0x25, 0x38, 0x90, 0xc7, 0x3b, 0x93, 0x6a, 0xcf, 0x79, 0xfc, 0x73, 0x0b, - 0x26, 0x0c, 0xfc, 0x43, 0xb8, 0x00, 0x97, 0xcc, 0x0b, 0xf0, 0xe1, 0x8e, 0xdf, 0x90, 0x73, 0xf1, - 0x7d, 0x7f, 0x31, 0xd1, 0x77, 0x76, 0xe1, 0xbd, 0x0d, 0x3d, 0x9b, 0x4e, 0x50, 0x13, 0xef, 0xfa, - 0xf3, 0x5d, 0x8d, 0xf5, 0xcc, 0x65, 0x27, 0x10, 0x96, 0x16, 0x4f, 0xcb, 0x51, 0xa7, 0x45, 0x1d, - 0xad, 0x2c, 0x58, 0x53, 0xe8, 0x22, 0xf4, 0x85, 0x55, 0xbf, 0xa9, 0xfc, 0xe4, 0x58, 0x3a, 0xed, - 0x0a, 0x2b, 0xd9, 0xdf, 0x2d, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x13, 0x46, 0xd8, - 0x2f, 0x65, 0xf6, 0x58, 0xcc, 0x97, 0xc0, 0x54, 0x74, 0x44, 0x6e, 0x13, 0x6c, 0x14, 0x61, 0x93, - 0xd4, 0xf4, 0x06, 0x0c, 0xaa, 0xcf, 0x7a, 0xa0, 0xda, 0xfa, 0xff, 0x58, 0x84, 0xc9, 0x8c, 0x35, - 0x87, 0x42, 0x63, 0x26, 0x2e, 0x74, 0xb9, 0x54, 0xdf, 0xe1, 0x5c, 0x84, 0xec, 0x01, 0x58, 0x13, - 0x6b, 0xab, 0xeb, 0x46, 0xaf, 0x87, 0x24, 0xd9, 0x28, 0x2d, 0xea, 0xdc, 0x28, 0x6d, 0xec, 0xd0, - 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0xfa, 0x40, 0xe7, 0xf4, 0x37, 0x7b, 0xe0, 0x48, 0x56, 0x0c, 0x69, - 0xf4, 0x19, 0x10, 0xe9, 0xff, 0xc5, 0xb4, 0x3e, 0xdf, 0x6e, 0x84, 0xf5, 0x9a, 0x33, 0xcc, 0x17, - 0x4c, 0x84, 0x6e, 0x9d, 0x91, 0xc7, 0x11, 0x2f, 0xec, 0x38, 0xcc, 0xa2, 0x4d, 0x16, 0x52, 0x49, - 0xdc, 0x9e, 0xf2, 0xf8, 0xf8, 0x50, 0xd7, 0x1d, 0x10, 0xf7, 0x6f, 0x98, 0x30, 0xa9, 0x92, 0xc5, - 0x9d, 0x4d, 0xaa, 0x64, 0xcb, 0x68, 0x19, 0xfa, 0xaa, 0xdc, 0x56, 0xa7, 0xd8, 0xf9, 0x08, 0xe3, - 0x86, 0x3a, 0xea, 0x00, 0x16, 0x06, 0x3a, 0x82, 0xc0, 0xb4, 0x0b, 0x43, 0xda, 0xc0, 0x3c, 0xd0, - 0xc5, 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xe8, 0x02, 0xfa, 0x31, 0xed, 0xee, 0x17, 0xe7, - 0xc1, 0x07, 0x0d, 0xde, 0xe9, 0x64, 0xc2, 0x05, 0x2f, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x31, 0x63, - 0x9e, 0xe7, 0xa6, 0x4e, 0x32, 0x2f, 0xfc, 0xf6, 0x71, 0xce, 0xed, 0x1f, 0xb7, 0x20, 0xe1, 0x24, - 0xa5, 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x0c, 0xf4, 0x04, 0x7e, 0x9d, 0x24, 0x53, 0xd3, 0x63, - 0xbf, 0x4e, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x62, 0x0d, 0xeb, 0x0f, 0x74, 0xf1, 0xf4, 0x7e, - 0x04, 0x7a, 0xeb, 0x64, 0x9b, 0xd4, 0x93, 0x19, 0x44, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xf6, 0x2f, - 0xf7, 0xc0, 0xa9, 0xb6, 0x91, 0xd7, 0x28, 0x83, 0xb9, 0xe1, 0x44, 0xe4, 0xb6, 0xb3, 0x93, 0xcc, - 0x9c, 0x77, 0x89, 0x17, 0x63, 0x09, 0x67, 0xce, 0xc8, 0x3c, 0x93, 0x4c, 0x42, 0x38, 0x2c, 0x12, - 0xc8, 0x08, 0xa8, 0x29, 0x6c, 0x2c, 0xde, 0x0f, 0x61, 0xe3, 0xb3, 0x00, 0x61, 0x58, 0xe7, 0x06, - 0x97, 0x35, 0xe1, 0xe5, 0x1c, 0x67, 0x1c, 0xaa, 0x5c, 0x15, 0x10, 0xac, 0x61, 0xa1, 0x05, 0x18, - 0x6f, 0x06, 0x7e, 0xc4, 0x65, 0xed, 0x0b, 0xdc, 0x26, 0xb9, 0xd7, 0x0c, 0x7a, 0x55, 0x4e, 0xc0, - 0x71, 0xaa, 0x06, 0x7a, 0x01, 0x86, 0x44, 0x20, 0xac, 0xb2, 0xef, 0xd7, 0x85, 0x78, 0x4f, 0x99, - 0xe9, 0x56, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0x80, 0xdf, 0x9f, 0x59, 0x8d, 0x0b, 0xf1, - 0x35, 0xbc, 0x44, 0xd0, 0xfc, 0x81, 0xae, 0x82, 0xe6, 0xc7, 0x02, 0xcf, 0xc1, 0xae, 0xf5, 0xc9, - 0xd0, 0x51, 0x44, 0xf8, 0x95, 0x1e, 0x98, 0x14, 0x0b, 0xe7, 0x41, 0x2f, 0x97, 0xeb, 0xe9, 0xe5, - 0x72, 0x3f, 0x44, 0xa2, 0xef, 0xaf, 0x99, 0xc3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x43, 0xa2, - 0xff, 0x2f, 0x37, 0xf5, 0xe8, 0x0b, 0xb9, 0x3c, 0x69, 0x1c, 0x51, 0xfb, 0x9d, 0x25, 0x21, 0xb5, - 0xff, 0x93, 0x05, 0x0f, 0x77, 0xa4, 0x88, 0x16, 0x61, 0x90, 0x31, 0xba, 0xda, 0xbb, 0xf8, 0x71, - 0xe5, 0xb3, 0x20, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa6, 0x72, 0xbc, 0x3e, 0x91, 0x91, - 0xe3, 0xf5, 0xa8, 0x31, 0x3c, 0xf7, 0x98, 0xe4, 0xf5, 0x0b, 0xf4, 0xc6, 0x31, 0x7d, 0x12, 0x3f, - 0x64, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x1a, 0x8c, 0xb3, 0x08, - 0x99, 0xcc, 0x83, 0x46, 0x38, 0x33, 0x16, 0x62, 0x2b, 0xf9, 0xab, 0x09, 0x18, 0x4e, 0x61, 0xdb, - 0x7f, 0x5a, 0x84, 0x3e, 0xbe, 0xfd, 0x0e, 0xe1, 0xe1, 0xfb, 0x14, 0x0c, 0xba, 0x8d, 0x46, 0x8b, - 0xa7, 0xed, 0xec, 0x8d, 0x6d, 0xae, 0x97, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x49, 0x68, 0x12, 0xda, - 0x04, 0xe1, 0xe6, 0x1d, 0x9f, 0x59, 0x70, 0x22, 0x87, 0x73, 0x71, 0xea, 0x9e, 0x8d, 0x75, 0x0e, - 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x06, 0x2d, 0x13, 0x99, 0x1a, 0x9e, 0x6c, 0x43, 0xad, - 0xa2, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0x33, 0xc6, 0x4d, 0x3f, 0x9d, - 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x7e, 0x11, 0x06, 0x15, 0xf1, 0x4e, 0x72, 0xc5, 0x61, - 0x9d, 0x61, 0xfb, 0x08, 0x8c, 0x25, 0xfa, 0x76, 0x20, 0xb1, 0xe4, 0xaf, 0x58, 0x30, 0xc6, 0x3b, - 0xb3, 0xe8, 0x6d, 0x8b, 0xdb, 0xe0, 0x2e, 0x1c, 0xa9, 0x67, 0x9c, 0xca, 0x62, 0xfa, 0xbb, 0x3f, - 0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xe7, 0xe8, 0x8e, 0xa3, 0xa7, 0xae, 0x53, - 0x17, 0xd1, 0x36, 0x86, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f, 0xb4, 0x60, 0x82, 0xf7, - 0xfc, 0x0a, 0xd9, 0x51, 0x67, 0xd3, 0xb7, 0xb2, 0xef, 0x22, 0x61, 0x74, 0x21, 0x27, 0x61, 0xb4, - 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, 0x65, 0x0b, 0xc4, 0x0a, 0x39, 0x04, 0x49, 0xcb, 0x77, 0x98, - 0x92, 0x96, 0xe9, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x6b, 0x0b, 0xc6, 0x39, 0x42, 0x6c, 0x05, - 0xf1, 0x2d, 0x9d, 0x87, 0x39, 0xf3, 0x8b, 0x32, 0xcd, 0x5a, 0xaf, 0x90, 0x9d, 0x35, 0xbf, 0xec, - 0x44, 0x9b, 0xd9, 0x1f, 0x65, 0x4c, 0x56, 0x4f, 0xdb, 0xc9, 0xaa, 0xc9, 0x0d, 0x64, 0x24, 0x26, - 0xec, 0x20, 0x00, 0x3e, 0x68, 0x62, 0x42, 0xfb, 0xcf, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0x71, 0xa3, - 0xec, 0x10, 0x2b, 0xd5, 0x2e, 0xba, 0xf8, 0x68, 0x52, 0x10, 0xac, 0x61, 0xdd, 0x97, 0xe1, 0x49, - 0x98, 0xb2, 0x14, 0x3b, 0x9b, 0xb2, 0x1c, 0x60, 0x44, 0xbf, 0xdc, 0x0f, 0x49, 0x9f, 0x49, 0x74, - 0x03, 0x86, 0xab, 0x4e, 0xd3, 0xb9, 0xe5, 0xd6, 0xdd, 0xc8, 0x25, 0x61, 0x3b, 0x3b, 0xb7, 0x79, - 0x0d, 0x4f, 0x18, 0x1f, 0x68, 0x25, 0xd8, 0xa0, 0x83, 0x66, 0x00, 0x9a, 0x81, 0xbb, 0xed, 0xd6, - 0xc9, 0x06, 0x13, 0x08, 0xb1, 0xf8, 0x3e, 0xdc, 0xe8, 0x4e, 0x96, 0x62, 0x0d, 0x23, 0x23, 0xf4, - 0x46, 0xf1, 0x01, 0x87, 0xde, 0x80, 0x43, 0x0b, 0xbd, 0xd1, 0x73, 0xa0, 0xd0, 0x1b, 0x03, 0x07, - 0x0e, 0xbd, 0xd1, 0xdb, 0x55, 0xe8, 0x0d, 0x0c, 0xc7, 0x24, 0xef, 0x49, 0xff, 0x2f, 0xb9, 0x75, - 0x22, 0x1e, 0x1c, 0x3c, 0x28, 0xd1, 0xf4, 0xde, 0x6e, 0xe9, 0x18, 0xce, 0xc4, 0xc0, 0x39, 0x35, - 0xd1, 0x47, 0x61, 0xca, 0xa9, 0xd7, 0xfd, 0xdb, 0x6a, 0x52, 0x17, 0xc3, 0xaa, 0x53, 0xe7, 0xca, - 0xa5, 0x7e, 0x46, 0xf5, 0xe4, 0xde, 0x6e, 0x69, 0x6a, 0x36, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, - 0xc0, 0x60, 0x33, 0xf0, 0xab, 0x2b, 0x9a, 0x63, 0xf7, 0x69, 0x3a, 0x80, 0x65, 0x59, 0xb8, 0xbf, - 0x5b, 0x1a, 0x51, 0x7f, 0xd8, 0x85, 0x1f, 0x57, 0xc8, 0x88, 0x6a, 0x31, 0xf4, 0xa0, 0xa3, 0x5a, - 0x0c, 0xdf, 0xef, 0xa8, 0x16, 0x5b, 0x30, 0x59, 0x21, 0x81, 0xeb, 0xd4, 0xdd, 0xbb, 0x94, 0x27, - 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x06, 0x89, 0x53, 0xbf, 0xab, 0xe0, 0xdb, 0x9a, 0x5c, 0x46, 0x9e, - 0xf2, 0x31, 0x21, 0xfb, 0x7f, 0x5b, 0xd0, 0x2f, 0xfc, 0x30, 0x0f, 0x81, 0x33, 0x9d, 0x35, 0x54, - 0x32, 0xa5, 0xec, 0x49, 0x61, 0x9d, 0xc9, 0x55, 0xc6, 0x2c, 0x27, 0x94, 0x31, 0x0f, 0xb7, 0x23, - 0xd2, 0x5e, 0x0d, 0xf3, 0x77, 0x8b, 0xf4, 0x85, 0x60, 0x44, 0x04, 0x78, 0xf0, 0x43, 0xb0, 0x0a, - 0xfd, 0xa1, 0xf0, 0x48, 0x2f, 0xe4, 0xfb, 0xf2, 0x24, 0x27, 0x31, 0xb6, 0x81, 0x14, 0x3e, 0xe8, - 0x92, 0x48, 0xa6, 0xab, 0x7b, 0xf1, 0x01, 0xba, 0xba, 0x77, 0x8a, 0x99, 0xd0, 0x73, 0x3f, 0x62, - 0x26, 0xd8, 0x5f, 0x63, 0xb7, 0xb3, 0x5e, 0x7e, 0x08, 0x8c, 0xdb, 0x25, 0xf3, 0x1e, 0xb7, 0xdb, - 0xac, 0x2c, 0xd1, 0xa9, 0x1c, 0x06, 0xee, 0x97, 0x2c, 0x38, 0x95, 0xf1, 0x55, 0x1a, 0x37, 0xf7, - 0x34, 0x0c, 0x38, 0xad, 0x9a, 0xab, 0xf6, 0xb2, 0xa6, 0x2d, 0x9e, 0x15, 0xe5, 0x58, 0x61, 0xa0, - 0x79, 0x98, 0x20, 0x77, 0x9a, 0x2e, 0x57, 0xc3, 0xeb, 0xa6, 0xe3, 0x45, 0xee, 0xbc, 0xbb, 0x98, - 0x04, 0xe2, 0x34, 0xbe, 0x0a, 0x89, 0x56, 0xcc, 0x0d, 0x89, 0xf6, 0x0b, 0x16, 0x0c, 0x29, 0x9f, - 0xec, 0x07, 0x3e, 0xda, 0xaf, 0x99, 0xa3, 0xfd, 0x50, 0x9b, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x28, - 0xa8, 0xfe, 0x96, 0xfd, 0x20, 0xea, 0x82, 0x4b, 0xbc, 0x77, 0xb7, 0x97, 0x0b, 0x30, 0xe4, 0x34, - 0x9b, 0x12, 0x20, 0xed, 0x17, 0x59, 0x2a, 0x85, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98, - 0xeb, 0x85, 0x53, 0x03, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37, - 0xad, 0xc8, 0xad, 0xcf, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0xcc, 0xb2, 0x17, 0x5d, 0x0b, 0xf8, 0x33, - 0x55, 0x0b, 0x2a, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x8c, 0x3f, 0xc2, 0xda, 0xe8, 0x35, 0x0d, 0x61, - 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8b, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0x80, 0x7a, 0x7f, - 0x3e, 0xac, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf4, 0xb0, 0x7d, 0xed, 0x0f, 0x7b, 0xda, 0xb0, 0xee, - 0xcf, 0x1a, 0xc7, 0xf6, 0x43, 0x1f, 0x4f, 0x19, 0x37, 0x3d, 0xd3, 0xe1, 0xd6, 0x38, 0x80, 0x39, - 0x13, 0xcb, 0xab, 0xc6, 0xb2, 0x4e, 0x2d, 0x97, 0xc5, 0xbe, 0xd0, 0xf2, 0xaa, 0x09, 0x00, 0x8e, - 0x71, 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x53, 0x28, 0x0e, 0xbf, 0xad, 0xb0, 0x43, 0xac, 0x61, 0xa0, - 0xf3, 0x42, 0x68, 0xc1, 0x75, 0x0f, 0x0f, 0x25, 0x84, 0x16, 0x72, 0xb8, 0x34, 0x49, 0xd3, 0x05, - 0x18, 0x22, 0x77, 0x22, 0x12, 0x78, 0x4e, 0x9d, 0xb6, 0xd0, 0x1b, 0x47, 0x8c, 0x5d, 0x8c, 0x8b, - 0xb1, 0x8e, 0x83, 0xd6, 0x60, 0x2c, 0xe4, 0xb2, 0x3c, 0x95, 0xf4, 0x81, 0xcb, 0x44, 0x9f, 0x54, - 0xde, 0xf0, 0x26, 0x78, 0x9f, 0x15, 0xf1, 0xd3, 0x49, 0xc6, 0x08, 0x49, 0x92, 0x40, 0xaf, 0xc2, - 0x68, 0xdd, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x6c, 0x7c, 0x06, 0xcc, 0xec, 0xfc, 0x57, - 0x0d, 0x28, 0x4e, 0x60, 0x53, 0x06, 0x51, 0x2f, 0x11, 0x89, 0x4a, 0x1c, 0x6f, 0x83, 0x84, 0x53, - 0x83, 0xec, 0xab, 0x18, 0x83, 0x78, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x17, 0x61, 0x58, 0x7e, - 0xbe, 0x16, 0x52, 0x27, 0x76, 0x68, 0xd2, 0x60, 0xd8, 0xc0, 0x44, 0x21, 0x1c, 0x95, 0xff, 0xd7, - 0x02, 0x67, 0x7d, 0xdd, 0xad, 0x8a, 0x38, 0x13, 0xdc, 0xf9, 0xfb, 0x23, 0xd2, 0xd3, 0x74, 0x31, - 0x0b, 0x69, 0x7f, 0xb7, 0x74, 0x52, 0x8c, 0x5a, 0x26, 0x1c, 0x67, 0xd3, 0x46, 0x2b, 0x30, 0xc9, - 0x6d, 0x60, 0xe6, 0x37, 0x49, 0x75, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x97, 0xd3, - 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0xa6, 0x9a, 0xad, 0x5b, 0x75, 0x37, 0xdc, 0x5c, 0xf5, 0x23, - 0x66, 0x42, 0x36, 0x5b, 0xab, 0x05, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x61, 0x90, 0xca, - 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x0b, 0x47, 0x13, 0x0b, 0x41, 0xc4, 0x33, 0x19, 0xcd, 0x4f, - 0xf9, 0x54, 0xc9, 0xaa, 0x20, 0x42, 0x03, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x25, 0x00, 0xb7, - 0xb9, 0xe4, 0x34, 0xdc, 0x3a, 0x7d, 0x8e, 0x4e, 0xb2, 0x35, 0x42, 0x9f, 0x26, 0xb0, 0x5c, 0x96, - 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc2, 0xa8, 0xf8, 0xb7, 0x23, 0xa6, - 0x74, 0x42, 0x65, 0x07, 0x1d, 0x95, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, 0x80, - 0x53, 0x32, 0x35, 0xa9, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xcf, 0xd2, 0x00, 0xf7, 0x29, 0x9a, - 0x6d, 0x87, 0x88, 0xdb, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xd1, 0x38, 0x12, - 0xe6, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0x47, 0x5d, 0x2f, 0x6b, 0x55, 0x1f, 0x63, 0x84, - 0x3e, 0xcc, 0x9d, 0xe5, 0xdb, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0x96, 0x61, 0x32, 0xe2, - 0x05, 0x0b, 0x6e, 0xc8, 0xd3, 0xb8, 0xd0, 0x67, 0xdf, 0x71, 0xd6, 0xdc, 0x71, 0xba, 0x9a, 0xd7, - 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0x9d, 0x19, 0x80, 0x7e, 0xdd, 0xa2, 0xb5, 0x35, 0x46, 0x1f, 0x7d, - 0x0a, 0x86, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xd9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, 0x41, - 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x35, 0x23, 0xc8, 0xc5, 0xf9, 0xee, 0x98, 0xa2, 0xee, - 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x15, 0x06, 0xaa, 0x75, 0x97, 0x78, 0xd1, 0x72, 0xb9, - 0x5d, 0x80, 0xd2, 0x79, 0x81, 0x23, 0xb6, 0xa2, 0xc8, 0xbe, 0xc4, 0xcb, 0xb0, 0xa2, 0x60, 0x5f, - 0x84, 0xa1, 0x4a, 0x9d, 0x90, 0x26, 0xf7, 0xe3, 0x42, 0x4f, 0xb0, 0x87, 0x09, 0x63, 0x2d, 0x2d, - 0xc6, 0x5a, 0xea, 0x6f, 0x0e, 0xc6, 0x54, 0x4a, 0xb8, 0xfd, 0xdb, 0x05, 0x28, 0x75, 0x48, 0x02, - 0x96, 0xd0, 0xb7, 0x59, 0x5d, 0xe9, 0xdb, 0x66, 0x61, 0x2c, 0xfe, 0xa7, 0x8b, 0xf2, 0x94, 0x31, - 0xf4, 0x0d, 0x13, 0x8c, 0x93, 0xf8, 0x5d, 0xfb, 0xb5, 0xe8, 0x2a, 0xbb, 0x9e, 0x8e, 0x9e, 0x59, - 0x86, 0xaa, 0xbe, 0xb7, 0xfb, 0xb7, 0x77, 0xae, 0xda, 0xd5, 0xfe, 0x5a, 0x01, 0x8e, 0xaa, 0x21, - 0x7c, 0xef, 0x0e, 0xdc, 0xf5, 0xf4, 0xc0, 0xdd, 0x07, 0xa5, 0xb5, 0x7d, 0x0d, 0xfa, 0x78, 0xd4, - 0xd4, 0x2e, 0x78, 0xfe, 0x47, 0xcc, 0xe0, 0xf4, 0x8a, 0xcd, 0x34, 0x02, 0xd4, 0xff, 0x80, 0x05, - 0x63, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0x7b, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0x33, 0xd0, - 0xb3, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xd9, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0x1f, 0x59, 0xd0, - 0xbb, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x9b, 0xef, 0x42, 0x2f, 0x40, - 0x1f, 0x59, 0x5f, 0x27, 0xd5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0xd1, 0xb7, 0xc8, 0x4a, 0x29, 0x13, - 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x37, 0x61, 0x30, 0x72, 0x1b, 0x64, 0xb6, 0x56, 0x13, - 0x36, 0x01, 0xf7, 0x10, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0x8b, 0x05, 0x80, 0x38, - 0x14, 0x5c, 0xa7, 0x4f, 0x9c, 0x4b, 0x69, 0x8b, 0xcf, 0x66, 0x68, 0x8b, 0x51, 0x4c, 0x30, 0x43, - 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd5, 0x30, 0xf5, 0x1c, 0x64, 0x98, 0xe6, 0x61, 0x22, 0x0e, 0x65, - 0x67, 0x46, 0xf2, 0x64, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x33, 0x2a, 0xa2, - 0x97, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x30, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, 0xab, - 0x0e, 0xff, 0x29, 0x0b, 0x8e, 0x24, 0xdb, 0x61, 0x7e, 0xf7, 0x9f, 0xb7, 0xe0, 0x68, 0x9c, 0x03, - 0x27, 0x6d, 0x82, 0xf0, 0x7c, 0xdb, 0x28, 0x65, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, 0x91, - 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xab, 0x07, 0xa6, 0xf2, 0xc2, 0x9b, 0x31, 0x4f, 0x23, 0xe7, 0x4e, - 0x65, 0x8b, 0xdc, 0x16, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0x4c, 0x7b, 0x54, - 0xe8, 0x32, 0xed, 0xd1, 0x26, 0x4c, 0xdc, 0xde, 0x24, 0xde, 0x75, 0x2f, 0x74, 0x22, 0x37, 0x5c, - 0x77, 0x99, 0x02, 0x9d, 0xaf, 0x1b, 0x99, 0xba, 0x7f, 0xe2, 0x66, 0x12, 0x61, 0x7f, 0xb7, 0x74, - 0xca, 0x28, 0x88, 0xbb, 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0xce, 0x1a, 0xd5, 0xf3, 0x80, 0xb3, - 0x46, 0x35, 0x5c, 0x61, 0x76, 0x23, 0xdd, 0x48, 0xd8, 0xb3, 0x75, 0x45, 0x95, 0x62, 0x0d, 0x03, - 0x7d, 0x02, 0x90, 0x9e, 0xf6, 0xcf, 0x88, 0x2e, 0xfb, 0xcc, 0xde, 0x6e, 0x09, 0xad, 0xa6, 0xa0, - 0xfb, 0xbb, 0xa5, 0x49, 0x5a, 0xba, 0xec, 0xd1, 0xe7, 0x6f, 0x1c, 0x92, 0x2f, 0x83, 0x10, 0xba, - 0x09, 0xe3, 0xb4, 0x94, 0xed, 0x28, 0x19, 0xba, 0x96, 0x3f, 0x59, 0x9f, 0xda, 0xdb, 0x2d, 0x8d, - 0xaf, 0x26, 0x60, 0x79, 0xa4, 0x53, 0x44, 0x32, 0x92, 0x47, 0x0d, 0x74, 0x9b, 0x3c, 0xca, 0xfe, - 0xbc, 0x05, 0x27, 0xe8, 0x05, 0x57, 0xbb, 0x9a, 0xa3, 0x45, 0x77, 0x9a, 0x2e, 0xd7, 0xd3, 0x88, - 0xab, 0x86, 0xc9, 0xea, 0xca, 0xcb, 0x5c, 0x4b, 0xa3, 0xa0, 0xf4, 0x84, 0xdf, 0x72, 0xbd, 0x5a, - 0xf2, 0x84, 0xbf, 0xe2, 0x7a, 0x35, 0xcc, 0x20, 0xea, 0xca, 0x2a, 0xe6, 0x46, 0xa9, 0xff, 0x0a, - 0xdd, 0xab, 0xb4, 0x2f, 0xdf, 0xd2, 0x6e, 0xa0, 0xa7, 0x74, 0x9d, 0xaa, 0x30, 0x9f, 0xcc, 0xd5, - 0xa7, 0x7e, 0xce, 0x02, 0xe1, 0xfd, 0xde, 0xc5, 0x9d, 0xfc, 0x26, 0x0c, 0x6f, 0xa7, 0x53, 0xa2, - 0x9e, 0xc9, 0x0f, 0x07, 0x20, 0x12, 0xa1, 0x2a, 0x16, 0xdd, 0x48, 0x7f, 0x6a, 0xd0, 0xb2, 0x6b, - 0x20, 0xa0, 0x0b, 0x84, 0x69, 0x35, 0x3a, 0xf7, 0xe6, 0x59, 0x80, 0x1a, 0xc3, 0x65, 0x79, 0xd2, - 0x0b, 0x26, 0xc7, 0xb5, 0xa0, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xae, 0x08, 0x43, 0x32, 0x05, 0x67, - 0xcb, 0xeb, 0x46, 0xf6, 0x78, 0xa0, 0x9c, 0xfc, 0xe8, 0x2d, 0x98, 0x08, 0x48, 0xb5, 0x15, 0x84, - 0xee, 0x36, 0x91, 0x60, 0xb1, 0x49, 0x66, 0x78, 0x92, 0x84, 0x04, 0x70, 0x9f, 0x85, 0xc8, 0x4a, - 0x14, 0x32, 0xa5, 0x71, 0x9a, 0x10, 0x3a, 0x0f, 0x83, 0x4c, 0xf4, 0x5e, 0x8e, 0x05, 0xc2, 0x4a, - 0xf0, 0xb5, 0x22, 0x01, 0x38, 0xc6, 0x61, 0x8f, 0x83, 0xd6, 0x2d, 0x86, 0x9e, 0xf0, 0x04, 0xaf, - 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0xa3, 0x30, 0xce, 0xeb, 0x05, 0x7e, 0xd3, 0xd9, 0xe0, 0x2a, 0xc1, - 0x5e, 0x15, 0x5e, 0x67, 0x7c, 0x25, 0x01, 0xdb, 0xdf, 0x2d, 0x1d, 0x49, 0x96, 0xb1, 0x6e, 0xa7, - 0xa8, 0x30, 0xcb, 0x3f, 0xde, 0x08, 0xbd, 0x33, 0x52, 0x06, 0x83, 0x31, 0x08, 0xeb, 0x78, 0xf6, - 0x5f, 0x59, 0x30, 0xa1, 0x4d, 0x55, 0xd7, 0x79, 0x2a, 0x8c, 0x41, 0x2a, 0x74, 0x31, 0x48, 0x07, - 0x8b, 0xf6, 0x90, 0x39, 0xc3, 0x3d, 0xf7, 0x69, 0x86, 0xed, 0x4f, 0x01, 0x4a, 0xe7, 0x77, 0x45, - 0xaf, 0x73, 0x43, 0x7e, 0x37, 0x20, 0xb5, 0x76, 0x0a, 0x7f, 0x3d, 0x72, 0x8e, 0xf4, 0x5c, 0xe5, - 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x60, 0x0f, 0x8c, 0x27, 0x63, 0x75, 0xa0, 0xcb, 0xd0, 0xc7, 0xb9, - 0x74, 0x41, 0xbe, 0x8d, 0x3d, 0x99, 0x16, 0xe1, 0x83, 0xf1, 0x2b, 0x82, 0xd1, 0x17, 0xf5, 0xd1, - 0x5b, 0x30, 0x54, 0xf3, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x36, 0x5b, 0x5e, 0x16, 0x27, 0x44, 0xa6, - 0x00, 0x6a, 0x21, 0x46, 0xd3, 0xa3, 0x86, 0x30, 0xdb, 0x89, 0x18, 0x84, 0x75, 0x72, 0x68, 0x8d, - 0xa5, 0x2c, 0x5a, 0x77, 0x37, 0x56, 0x9c, 0x66, 0x3b, 0xaf, 0xae, 0x79, 0x89, 0xa4, 0x51, 0x1e, - 0x11, 0x79, 0x8d, 0x38, 0x00, 0xc7, 0x84, 0xd0, 0x67, 0x60, 0x32, 0xcc, 0x51, 0x89, 0xe5, 0xa5, - 0xfb, 0x6e, 0xa7, 0x25, 0xe2, 0xc2, 0x94, 0x2c, 0xe5, 0x59, 0x56, 0x33, 0xe8, 0x0e, 0x20, 0x21, - 0x7a, 0x5e, 0x0b, 0x5a, 0x61, 0x34, 0xd7, 0xf2, 0x6a, 0x75, 0x99, 0xd2, 0xe8, 0x83, 0xd9, 0x72, - 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x8b, 0xdd, 0x9b, 0xc6, 0xc0, 0x19, 0x6d, 0xd8, 0x9f, 0xeb, 0x81, - 0x69, 0x99, 0x50, 0x39, 0xc3, 0x7b, 0xe5, 0xb3, 0x56, 0xc2, 0x7d, 0xe5, 0xa5, 0xfc, 0x83, 0xfe, - 0x81, 0x39, 0xb1, 0x7c, 0x21, 0xed, 0xc4, 0xf2, 0xca, 0x01, 0xbb, 0x71, 0xdf, 0x5c, 0x59, 0xde, - 0xb3, 0xfe, 0x27, 0x7b, 0x47, 0xc0, 0xb8, 0x9a, 0x11, 0xe6, 0x81, 0xd1, 0xcb, 0x52, 0x75, 0x94, - 0xf3, 0xfc, 0xbf, 0x2c, 0x70, 0x8c, 0xcb, 0x7e, 0x58, 0x86, 0x4f, 0x67, 0xe7, 0xac, 0xa2, 0x43, - 0x69, 0x92, 0x46, 0x33, 0xda, 0x59, 0x70, 0x03, 0xd1, 0xe3, 0x4c, 0x9a, 0x8b, 0x02, 0x27, 0x4d, - 0x53, 0x42, 0xb0, 0xa2, 0x83, 0xb6, 0x61, 0x62, 0x83, 0x45, 0x7c, 0xd2, 0x72, 0x1b, 0x8b, 0x73, - 0x21, 0x73, 0xdf, 0x5e, 0x9a, 0x5f, 0xcc, 0x4f, 0x84, 0xcc, 0x1f, 0x7f, 0x29, 0x14, 0x9c, 0x6e, - 0x82, 0x6e, 0x8d, 0x23, 0xce, 0xed, 0x70, 0xb1, 0xee, 0x84, 0x91, 0x5b, 0x9d, 0xab, 0xfb, 0xd5, - 0xad, 0x4a, 0xe4, 0x07, 0x32, 0x01, 0x62, 0xe6, 0xdb, 0x6b, 0xf6, 0x66, 0x25, 0x85, 0x6f, 0x34, - 0x3f, 0xb5, 0xb7, 0x5b, 0x3a, 0x92, 0x85, 0x85, 0x33, 0xdb, 0x42, 0xab, 0xd0, 0xbf, 0xe1, 0x46, - 0x98, 0x34, 0x7d, 0x71, 0x5a, 0x64, 0x1e, 0x85, 0x97, 0x38, 0x8a, 0xd1, 0x12, 0x8b, 0x48, 0x25, - 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x4b, 0xa0, 0x2f, 0x5f, 0x00, 0x9b, 0xb6, 0xbd, 0xcb, 0xbc, - 0x06, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0xb6, 0x8b, 0xc5, 0xb3, 0xba, 0x64, 0xc8, 0xcf, 0xe6, 0xfa, - 0xe9, 0xd3, 0x78, 0x75, 0xa9, 0x82, 0x69, 0x45, 0xe6, 0xf6, 0x1a, 0x56, 0x43, 0x57, 0x24, 0x5c, - 0xca, 0xf4, 0x02, 0x5e, 0xae, 0xcc, 0x57, 0x96, 0x0d, 0x1a, 0x2c, 0xaa, 0x21, 0x2b, 0xc6, 0xbc, - 0x3a, 0xba, 0x01, 0x83, 0x1b, 0xfc, 0xe0, 0x5b, 0x0f, 0x45, 0x52, 0xf5, 0xcc, 0xcb, 0xe8, 0x92, - 0x44, 0x32, 0xe8, 0xb1, 0x2b, 0x43, 0x81, 0x70, 0x4c, 0x0a, 0x7d, 0xce, 0x82, 0xa3, 0xc9, 0xac, - 0xf4, 0xcc, 0x59, 0x4d, 0x98, 0xa9, 0x65, 0x3a, 0x00, 0x94, 0xb3, 0x2a, 0x18, 0x0d, 0x32, 0xf5, - 0x4b, 0x26, 0x1a, 0xce, 0x6e, 0x8e, 0x0e, 0x74, 0x70, 0xab, 0xd6, 0x2e, 0x47, 0x4f, 0x22, 0x30, - 0x11, 0x1f, 0x68, 0x3c, 0xb7, 0x80, 0x69, 0x45, 0xb4, 0x06, 0xb0, 0x5e, 0x27, 0x22, 0xe2, 0xa3, - 0x30, 0x8a, 0xca, 0xbc, 0xfd, 0x97, 0x14, 0x96, 0xa0, 0xc3, 0x5e, 0xa2, 0x71, 0x29, 0xd6, 0xe8, - 0xd0, 0xa5, 0x54, 0x75, 0xbd, 0x1a, 0x09, 0x98, 0x72, 0x2b, 0x67, 0x29, 0xcd, 0x33, 0x8c, 0xf4, - 0x52, 0xe2, 0xe5, 0x58, 0x50, 0x60, 0xb4, 0x48, 0x73, 0x73, 0x3d, 0x6c, 0x97, 0x72, 0x62, 0x9e, - 0x34, 0x37, 0x13, 0x0b, 0x8a, 0xd3, 0x62, 0xe5, 0x58, 0x50, 0xa0, 0x5b, 0x66, 0x9d, 0x6e, 0x20, - 0x12, 0x4c, 0x8d, 0xe5, 0x6f, 0x99, 0x25, 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, - 0x49, 0x93, 0xdb, 0x19, 0x67, 0x34, 0x9f, 0xea, 0xc0, 0xed, 0x18, 0x74, 0xdb, 0xf3, 0x3b, 0x2f, - 0x41, 0x61, 0xbd, 0xca, 0x94, 0x62, 0x39, 0x3a, 0x83, 0xa5, 0x79, 0x83, 0x1a, 0x0b, 0xe1, 0xbe, - 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xba, 0xf4, 0x9d, 0xbb, 0xad, 0x80, 0x2c, 0xb9, 0x75, 0x22, 0xd2, - 0x4f, 0x64, 0x2e, 0xfd, 0x59, 0x89, 0x94, 0x5e, 0xfa, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0xcc, - 0x83, 0x4d, 0xe6, 0xd3, 0x55, 0xac, 0x56, 0x9a, 0x6e, 0x26, 0x17, 0xb6, 0x05, 0x23, 0xdb, 0x61, - 0x73, 0x93, 0xc8, 0x53, 0x91, 0xa9, 0xeb, 0x72, 0x22, 0x55, 0xdc, 0x10, 0x88, 0x6e, 0x10, 0xb5, - 0x9c, 0x7a, 0xea, 0x20, 0x67, 0xa2, 0x95, 0x1b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0x0b, 0xe1, 0x6d, - 0x1e, 0x4e, 0x8e, 0x29, 0xee, 0x72, 0x16, 0x42, 0x46, 0xc4, 0x39, 0xbe, 0x10, 0x04, 0x00, 0x4b, - 0x22, 0x6a, 0xb0, 0xd9, 0x05, 0x74, 0xac, 0xc3, 0x60, 0xa7, 0xfa, 0x1b, 0x0f, 0x36, 0xbb, 0x70, - 0x62, 0x52, 0xec, 0xa2, 0x69, 0x66, 0x24, 0xf0, 0x67, 0x6a, 0xbb, 0x9c, 0x8b, 0xa6, 0x53, 0xc2, - 0x7f, 0x7e, 0xd1, 0x64, 0x61, 0xe1, 0xcc, 0xb6, 0xe8, 0xc7, 0x35, 0x65, 0x64, 0x40, 0x91, 0x22, - 0xe3, 0x89, 0x9c, 0xc0, 0x9a, 0xe9, 0xf0, 0x81, 0xfc, 0xe3, 0x14, 0x08, 0xc7, 0xa4, 0x50, 0x0d, - 0x46, 0x9b, 0x46, 0xc4, 0x59, 0x96, 0xea, 0x23, 0x87, 0x2f, 0xc8, 0x8a, 0x4d, 0xcb, 0x25, 0x44, - 0x26, 0x04, 0x27, 0x68, 0x32, 0xcb, 0x3d, 0xee, 0xea, 0xc7, 0x32, 0x81, 0xe4, 0x4c, 0x75, 0x86, - 0x37, 0x20, 0x9f, 0x6a, 0x01, 0xc0, 0x92, 0x08, 0x1d, 0x0d, 0xe1, 0xa0, 0xe6, 0x87, 0x2c, 0xa1, - 0x4e, 0x9e, 0x82, 0x3d, 0x4b, 0x4d, 0x24, 0xc3, 0xac, 0x0b, 0x10, 0x8e, 0x49, 0xd1, 0x93, 0x9c, - 0x5e, 0x78, 0x27, 0xf3, 0x4f, 0xf2, 0xe4, 0x75, 0xc7, 0x4e, 0x72, 0x7a, 0xd9, 0x15, 0xc5, 0x55, - 0xa7, 0xa2, 0x82, 0xb3, 0x64, 0x20, 0x39, 0xfd, 0x52, 0x61, 0xc5, 0xd3, 0xfd, 0x52, 0x20, 0x1c, - 0x93, 0x62, 0x57, 0x31, 0x0b, 0x4d, 0x77, 0xba, 0xcd, 0x55, 0x4c, 0x11, 0x32, 0xae, 0x62, 0x2d, - 0x74, 0x9d, 0xfd, 0x83, 0x05, 0x38, 0xdd, 0x7e, 0xdf, 0xc6, 0x3a, 0xb4, 0x72, 0x6c, 0xb3, 0x94, - 0xd0, 0xa1, 0x71, 0x89, 0x4e, 0x8c, 0xd5, 0x75, 0xc0, 0xe1, 0x4b, 0x30, 0xa1, 0xdc, 0x11, 0xeb, - 0x6e, 0x75, 0x47, 0x4b, 0xce, 0xa9, 0x42, 0xf3, 0x54, 0x92, 0x08, 0x38, 0x5d, 0x07, 0xcd, 0xc2, - 0x98, 0x51, 0xb8, 0xbc, 0x20, 0x9e, 0xff, 0x71, 0x1a, 0x0b, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, - 0x5b, 0x70, 0x3c, 0x27, 0x0f, 0x7b, 0xd7, 0xf1, 0x74, 0xd7, 0x61, 0xac, 0x69, 0x56, 0xed, 0x10, - 0x02, 0xdc, 0xc8, 0xf6, 0xae, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xfb, 0x67, 0x0b, 0x70, 0xaa, - 0xad, 0x7d, 0x3d, 0xc2, 0x70, 0x6c, 0xa3, 0x11, 0x3a, 0xf3, 0x01, 0xa9, 0x11, 0x2f, 0x72, 0x9d, - 0x7a, 0xa5, 0x49, 0xaa, 0x9a, 0x16, 0x94, 0x19, 0xaa, 0x5f, 0x5a, 0xa9, 0xcc, 0xa6, 0x31, 0x70, - 0x4e, 0x4d, 0xb4, 0x04, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x9e, 0xb8, 0x69, 0x7a, 0x38, 0xa3, 0x06, - 0x7a, 0x11, 0x46, 0x94, 0xdd, 0xbe, 0x36, 0xe3, 0xec, 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, - 0x02, 0xcf, 0x6f, 0x24, 0x32, 0x61, 0x09, 0x95, 0xe9, 0x98, 0x4c, 0x5e, 0x24, 0x8a, 0xb1, 0x8e, - 0x33, 0x77, 0xf1, 0x77, 0xbe, 0x79, 0xfa, 0x03, 0xbf, 0xff, 0xcd, 0xd3, 0x1f, 0xf8, 0xc3, 0x6f, - 0x9e, 0xfe, 0xc0, 0xf7, 0xec, 0x9d, 0xb6, 0x7e, 0x67, 0xef, 0xb4, 0xf5, 0xfb, 0x7b, 0xa7, 0xad, - 0x3f, 0xdc, 0x3b, 0x6d, 0xfd, 0xd7, 0xbd, 0xd3, 0xd6, 0x17, 0xff, 0xe4, 0xf4, 0x07, 0xde, 0x44, - 0x71, 0x84, 0xea, 0xf3, 0x74, 0x76, 0xce, 0x6f, 0x5f, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x67, 0xd5, 0x38, 0x2d, 0xc3, 0x23, 0x01, 0x00, + // 16114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9, + 0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee, + 0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74, + 0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32, + 0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08, + 0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e, + 0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82, + 0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd, + 0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0, + 0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c, + 0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38, + 0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6, + 0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb, + 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24, + 0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e, + 0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17, + 0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42, + 0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b, + 0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89, + 0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb, + 0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb, + 0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26, + 0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18, + 0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04, + 0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e, + 0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18, + 0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67, + 0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7, + 0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9, + 0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04, + 0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43, + 0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd, + 0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22, + 0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40, + 0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02, + 0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18, + 0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2, + 0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b, + 0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25, + 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77, + 0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a, + 0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b, + 0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15, + 0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa, + 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29, + 0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9, + 0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60, + 0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c, + 0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b, + 0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b, + 0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d, + 0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49, + 0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7, + 0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9, + 0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d, + 0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf, + 0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57, + 0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f, + 0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91, + 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38, + 0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56, + 0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa, + 0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3, + 0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d, + 0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc, + 0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56, + 0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9, + 0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7, + 0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6, + 0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f, + 0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab, + 0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc, + 0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2, + 0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4, + 0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6, + 0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57, + 0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71, + 0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b, + 0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, + 0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46, + 0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43, + 0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e, + 0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15, + 0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf, + 0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f, + 0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d, + 0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f, + 0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, + 0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48, + 0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11, + 0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40, + 0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff, + 0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31, + 0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, + 0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45, + 0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2, + 0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd, + 0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b, + 0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c, + 0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61, + 0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4, + 0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26, + 0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea, + 0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b, + 0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31, + 0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46, + 0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18, + 0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1, + 0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f, + 0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3, + 0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58, + 0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73, + 0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc, + 0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f, + 0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d, + 0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19, + 0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70, + 0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5, + 0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06, + 0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12, + 0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47, + 0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c, + 0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d, + 0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8, + 0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b, + 0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39, + 0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18, + 0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f, + 0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58, + 0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99, + 0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d, + 0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b, + 0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7, + 0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37, + 0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76, + 0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43, + 0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75, + 0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca, + 0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6, + 0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2, + 0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c, + 0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1, + 0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f, + 0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2, + 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c, + 0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa, + 0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99, + 0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6, + 0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29, + 0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab, + 0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52, + 0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca, + 0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca, + 0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25, + 0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4, + 0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c, + 0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12, + 0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75, + 0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e, + 0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14, + 0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a, + 0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab, + 0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c, + 0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85, + 0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83, + 0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70, + 0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59, + 0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58, + 0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5, + 0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96, + 0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09, + 0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb, + 0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda, + 0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88, + 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc, + 0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2, + 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75, + 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19, + 0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5, + 0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf, + 0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d, + 0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9, + 0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49, + 0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb, + 0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4, + 0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, + 0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2, + 0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75, + 0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf, + 0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee, + 0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d, + 0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71, + 0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52, + 0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31, + 0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4, + 0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6, + 0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed, + 0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44, + 0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1, + 0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, + 0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, + 0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3, + 0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0, + 0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd, + 0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c, + 0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5, + 0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44, + 0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09, + 0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, + 0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e, + 0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0, + 0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5, + 0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65, + 0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67, + 0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80, + 0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5, + 0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa, + 0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a, + 0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f, + 0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64, + 0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82, + 0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a, + 0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32, + 0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f, + 0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a, + 0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3, + 0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07, + 0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c, + 0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e, + 0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd, + 0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05, + 0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09, + 0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83, + 0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1, + 0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca, + 0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac, + 0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43, + 0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69, + 0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49, + 0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69, + 0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14, + 0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1, + 0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60, + 0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a, + 0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e, + 0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4, + 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c, + 0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7, + 0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa, + 0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d, + 0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55, + 0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8, + 0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20, + 0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4, + 0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37, + 0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d, + 0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4, + 0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68, + 0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05, + 0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07, + 0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff, + 0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d, + 0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28, + 0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8, + 0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c, + 0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e, + 0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d, + 0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d, + 0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46, + 0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0, + 0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1, + 0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09, + 0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b, + 0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99, + 0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb, + 0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee, + 0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28, + 0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66, + 0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63, + 0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02, + 0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21, + 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90, + 0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88, + 0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35, + 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30, + 0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8, + 0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7, + 0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79, + 0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5, + 0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18, + 0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72, + 0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e, + 0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8, + 0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31, + 0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88, + 0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39, + 0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3, + 0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5, + 0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31, + 0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, + 0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a, + 0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e, + 0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5, + 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc, + 0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab, + 0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb, + 0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9, + 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa, + 0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05, + 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f, + 0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda, + 0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11, + 0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01, + 0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae, + 0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a, + 0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b, + 0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee, + 0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76, + 0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d, + 0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7, + 0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8, + 0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab, + 0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5, + 0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c, + 0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda, + 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e, + 0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e, + 0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e, + 0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07, + 0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5, + 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91, + 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9, + 0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a, + 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb, + 0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00, + 0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30, + 0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5, + 0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74, + 0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15, + 0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb, + 0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc, + 0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85, + 0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2, + 0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22, + 0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06, + 0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93, + 0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47, + 0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4, + 0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69, + 0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c, + 0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e, + 0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5, + 0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1, + 0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b, + 0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f, + 0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, + 0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce, + 0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55, + 0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60, + 0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18, + 0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70, + 0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79, + 0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, + 0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43, + 0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4, + 0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57, + 0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e, + 0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, + 0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3, + 0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98, + 0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed, + 0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72, + 0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f, + 0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07, + 0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9, + 0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09, + 0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d, + 0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71, + 0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, + 0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76, + 0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf, + 0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82, + 0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, + 0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, + 0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72, + 0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd, + 0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, + 0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f, + 0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d, + 0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, + 0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, + 0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, + 0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde, + 0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, + 0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, + 0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85, + 0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8, + 0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45, + 0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6, + 0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d, + 0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, + 0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5, + 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c, + 0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce, + 0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc, + 0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b, + 0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41, + 0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2, + 0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab, + 0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4, + 0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee, + 0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79, + 0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf, + 0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97, + 0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3, + 0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30, + 0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc, + 0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16, + 0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43, + 0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0, + 0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10, + 0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38, + 0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11, + 0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c, + 0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d, + 0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6, + 0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb, + 0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, + 0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, + 0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1, + 0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, + 0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, + 0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68, + 0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, + 0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, + 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, + 0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, + 0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3, + 0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96, + 0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31, + 0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59, + 0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4, + 0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7, + 0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8, + 0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, + 0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c, + 0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76, + 0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1, + 0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9, + 0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12, + 0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61, + 0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79, + 0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, + 0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7, + 0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5, + 0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, + 0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, + 0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4, + 0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe, + 0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79, + 0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44, + 0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9, + 0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f, + 0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29, + 0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78, + 0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28, + 0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb, + 0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c, + 0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16, + 0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30, + 0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d, + 0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74, + 0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2, + 0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff, + 0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39, + 0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47, + 0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77, + 0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1, + 0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb, + 0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae, + 0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe, + 0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6, + 0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0, + 0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b, + 0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6, + 0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54, + 0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac, + 0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55, + 0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62, + 0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b, + 0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3, + 0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49, + 0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4, + 0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b, + 0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82, + 0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82, + 0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f, + 0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50, + 0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1, + 0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5, + 0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1, + 0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f, + 0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03, + 0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39, + 0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01, + 0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c, + 0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb, + 0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58, + 0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6, + 0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90, + 0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3, + 0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d, + 0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04, + 0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, + 0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53, + 0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a, + 0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc, + 0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09, + 0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75, + 0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d, + 0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50, + 0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44, + 0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce, + 0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6, + 0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05, + 0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb, + 0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd, + 0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92, + 0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a, + 0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a, + 0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64, + 0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83, + 0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, + 0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f, + 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3, + 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f, + 0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb, + 0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06, + 0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9, + 0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89, + 0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f, + 0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9, + 0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4, + 0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, + 0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f, + 0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03, + 0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03, + 0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03, + 0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7, + 0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41, + 0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6, + 0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a, + 0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9, + 0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b, + 0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8, + 0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7, + 0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6, + 0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77, + 0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86, + 0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa, + 0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd, + 0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93, + 0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4, + 0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a, + 0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4, + 0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a, + 0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54, + 0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e, + 0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5, + 0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30, + 0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb, + 0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e, + 0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63, + 0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20, + 0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c, + 0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb, + 0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0, + 0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85, + 0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60, + 0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9, + 0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c, + 0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f, + 0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad, + 0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf, + 0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53, + 0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d, + 0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39, + 0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f, + 0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, + 0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8, + 0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, + 0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, + 0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, + 0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba, + 0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14, + 0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36, + 0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b, + 0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05, + 0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d, + 0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1, + 0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23, + 0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15, + 0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d, + 0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d, + 0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4, + 0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70, + 0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7, + 0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39, + 0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22, + 0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e, + 0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09, + 0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82, + 0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1, + 0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30, + 0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56, + 0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c, + 0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40, + 0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37, + 0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6, + 0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6, + 0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8, + 0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae, + 0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d, + 0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10, + 0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18, + 0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3, + 0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb, + 0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c, + 0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc, + 0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e, + 0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62, + 0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98, + 0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d, + 0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47, + 0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76, + 0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8, + 0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75, + 0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93, + 0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, + 0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22, + 0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1, + 0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1, + 0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9, + 0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea, + 0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d, + 0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40, + 0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c, + 0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5, + 0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27, + 0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a, + 0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, + 0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3, + 0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c, + 0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60, + 0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c, + 0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f, + 0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9, + 0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9, + 0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad, + 0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc, + 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a, + 0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75, + 0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0, + 0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35, + 0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43, + 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62, + 0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d, + 0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40, + 0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed, + 0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8, + 0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24, + 0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49, + 0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53, + 0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f, + 0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49, + 0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37, + 0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea, + 0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c, + 0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a, + 0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe, + 0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d, + 0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6, + 0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0, + 0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19, + 0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e, + 0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d, + 0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c, + 0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92, + 0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51, + 0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7, + 0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4, + 0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a, + 0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09, + 0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b, + 0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66, + 0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a, + 0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, + 0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01, + 0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc, + 0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39, + 0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74, + 0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd, + 0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7, + 0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47, + 0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea, + 0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71, + 0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d, + 0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62, + 0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b, + 0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e, + 0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d, + 0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad, + 0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2, + 0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a, + 0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35, + 0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e, + 0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3, + 0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb, + 0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc, + 0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a, + 0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69, + 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef, + 0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd, + 0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02, + 0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50, + 0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa, + 0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87, + 0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e, + 0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c, + 0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29, + 0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57, + 0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc, + 0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, + 0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44, + 0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0, + 0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, + 0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d, + 0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b, + 0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05, + 0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d, + 0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11, + 0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d, + 0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01, + 0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13, + 0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69, + 0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38, + 0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1, + 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78, + 0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f, + 0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47, + 0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84, + 0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8, + 0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6, + 0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, + 0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5, + 0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd, + 0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f, + 0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74, + 0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2, + 0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b, + 0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26, + 0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28, + 0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec, + 0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7, + 0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7, + 0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76, + 0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08, + 0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad, + 0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55, + 0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82, + 0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a, + 0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51, + 0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda, + 0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73, + 0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7, + 0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27, + 0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5, + 0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e, + 0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, + 0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35, + 0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59, + 0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84, + 0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e, + 0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70, + 0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71, + 0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68, + 0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26, + 0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a, + 0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90, + 0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46, + 0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6, + 0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67, + 0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27, + 0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03, + 0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10, + 0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9, + 0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93, + 0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40, + 0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b, + 0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83, + 0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75, + 0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67, + 0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73, + 0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb, + 0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d, + 0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf, + 0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae, + 0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c, + 0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71, + 0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc, + 0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02, + 0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83, + 0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a, + 0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba, + 0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a, + 0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c, + 0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc, + 0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b, + 0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2, + 0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3, + 0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf, + 0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d, + 0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40, + 0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71, + 0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64, + 0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72, + 0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f, + 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58, + 0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43, + 0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8, + 0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66, + 0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58, + 0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a, + 0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2, + 0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33, + 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0, + 0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1, + 0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60, + 0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c, + 0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e, + 0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e, + 0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08, + 0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42, + 0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03, + 0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6, + 0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1, + 0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb, + 0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32, + 0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3, + 0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc, + 0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e, + 0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda, + 0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6, + 0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d, + 0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09, + 0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6, + 0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58, + 0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c, + 0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd, + 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a, + 0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a, + 0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15, + 0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e, + 0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3, + 0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a, + 0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23, + 0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf, + 0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11, + 0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc, + 0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98, + 0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f, + 0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3, + 0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8, + 0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb, + 0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98, + 0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21, + 0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce, + 0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46, + 0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75, + 0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44, + 0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5, + 0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39, + 0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb, + 0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9, + 0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9, + 0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a, + 0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07, + 0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9, + 0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5, + 0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5, + 0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29, + 0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff, + 0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5, + 0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d, + 0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda, + 0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4, + 0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99, + 0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c, + 0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84, + 0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e, + 0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34, + 0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46, + 0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6, + 0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c, + 0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c, + 0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a, + 0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6, + 0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe, + 0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7, + 0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4, + 0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5, + 0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b, + 0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc, + 0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3, + 0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6, + 0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb, + 0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d, + 0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27, + 0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac, + 0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8, + 0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94, + 0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59, + 0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae, + 0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf, + 0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b, + 0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f, + 0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf, + 0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71, + 0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e, + 0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2, + 0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3, + 0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3, + 0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e, + 0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c, + 0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e, + 0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a, + 0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d, + 0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3, + 0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8, + 0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38, + 0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61, + 0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd, + 0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1, + 0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59, + 0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5, + 0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38, + 0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7, + 0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5, + 0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a, + 0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb, + 0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d, + 0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89, + 0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f, + 0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5, + 0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99, + 0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16, + 0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14, + 0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81, + 0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e, + 0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15, + 0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59, + 0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f, + 0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2, + 0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9, + 0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0, + 0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15, + 0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58, + 0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60, + 0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22, + 0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95, + 0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38, + 0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6, + 0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8, + 0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77, + 0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a, + 0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69, + 0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, + 0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, + 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, + 0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e, + 0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47, + 0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, + 0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6, + 0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59, + 0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, + 0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, + 0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35, + 0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9, + 0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c, + 0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, + 0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, + 0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, + 0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8, + 0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78, + 0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3, + 0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea, + 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89, + 0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a, + 0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34, + 0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb, + 0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0, + 0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6, + 0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f, + 0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff, + 0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3, + 0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24, + 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -16016,6 +16020,13 @@ func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Stream != nil { + i -= len(*m.Stream) + copy(dAtA[i:], *m.Stream) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Stream))) + i-- + dAtA[i] = 0x52 + } i-- if m.InsecureSkipTLSVerifyBackend { dAtA[i] = 1 @@ -16322,6 +16333,13 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SELinuxChangePolicy != nil { + i -= len(*m.SELinuxChangePolicy) + copy(dAtA[i:], *m.SELinuxChangePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SELinuxChangePolicy))) + i-- + dAtA[i] = 0x6a + } if m.SupplementalGroupsPolicy != nil { i -= len(*m.SupplementalGroupsPolicy) copy(dAtA[i:], *m.SupplementalGroupsPolicy) @@ -16488,6 +16506,20 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } if len(m.ResourceClaims) > 0 { for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { { @@ -24737,6 +24769,10 @@ func (m *PodLogOptions) Size() (n int) { n += 1 + sovGenerated(uint64(*m.LimitBytes)) } n += 2 + if m.Stream != nil { + l = len(*m.Stream) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24885,6 +24921,10 @@ func (m *PodSecurityContext) Size() (n int) { l = len(*m.SupplementalGroupsPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.SELinuxChangePolicy != nil { + l = len(*m.SELinuxChangePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -25059,6 +25099,10 @@ func (m *PodSpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.Resources != nil { + l = m.Resources.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -29088,6 +29132,7 @@ func (this *PodLogOptions) String() string { `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `Stream:` + valueToStringGenerated(this.Stream) + `,`, `}`, }, "") return s @@ -29187,6 +29232,7 @@ func (this *PodSecurityContext) String() string { `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, + `SELinuxChangePolicy:` + valueToStringGenerated(this.SELinuxChangePolicy) + `,`, `}`, }, "") return s @@ -29320,6 +29366,7 @@ func (this *PodSpec) String() string { `HostUsers:` + valueToStringGenerated(this.HostUsers) + `,`, `SchedulingGates:` + repeatedStringForSchedulingGates + `,`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `}`, }, "") return s @@ -56954,6 +57001,39 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } } m.InsecureSkipTLSVerifyBackend = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Stream = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -58122,6 +58202,39 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex]) m.SupplementalGroupsPolicy = &s iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxChangePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PodSELinuxChangePolicy(dAtA[iNdEx:postIndex]) + m.SELinuxChangePolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -59611,6 +59724,42 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 40: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 68ac80ed0..08706987c 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -181,7 +181,6 @@ message AzureFileVolumeSource { } // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -192,7 +191,7 @@ message Binding { optional ObjectReference target = 2; } -// Represents storage that is managed by an external CSI volume driver (Beta feature) +// Represents storage that is managed by an external CSI volume driver message CSIPersistentVolumeSource { // driver is the name of the driver to use for this volume. // Required. @@ -1071,7 +1070,7 @@ message ContainerStatus { // AllocatedResources represents the compute resources allocated for this container by the // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission // and after successfully admitting desired pod resize. - // +featureGate=InPlacePodVerticalScaling + // +featureGate=InPlacePodVerticalScalingAllocatedStatus // +optional map allocatedResources = 10; @@ -1870,6 +1869,7 @@ message GCEPersistentDiskVolumeSource { optional bool readOnly = 4; } +// GRPCAction specifies an action involving a GRPC service. message GRPCAction { // Port number of the gRPC service. Number must be in the range 1 to 65535. optional int32 port = 1; @@ -2203,21 +2203,21 @@ message Lifecycle { // LifecycleHandler defines a specific action that should be taken in a lifecycle // hook. One and only one of the fields, except TCPSocket must be specified. message LifecycleHandler { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional optional ExecAction exec = 1; - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional optional HTTPGetAction httpGet = 2; // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - // for the backward compatibility. There are no validation of this field and - // lifecycle hooks will fail in runtime when tcp handler is specified. + // for backward compatibility. There is no validation of this field and + // lifecycle hooks will fail at runtime when it is specified. // +optional optional TCPSocketAction tcpSocket = 3; - // Sleep represents the duration that the container should sleep before being terminated. + // Sleep represents a duration that the container should sleep. // +featureGate=PodLifecycleSleepAction // +optional optional SleepAction sleep = 4; @@ -2346,13 +2346,23 @@ message LoadBalancerStatus { // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic message LocalObjectReference { // Name of the referent. // This field is effectively required, but due to backwards compatibility is // allowed to be empty. Instances of this type with an empty value here are // almost certainly wrong. - // TODO: Add other useful fields. apiVersion, kind, uid? // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional // +default="" @@ -2361,7 +2371,7 @@ message LocalObjectReference { optional string name = 1; } -// Local represents directly-attached storage with node affinity (Beta feature) +// Local represents directly-attached storage with node affinity message LocalVolumeSource { // path of the full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). @@ -2438,12 +2448,15 @@ message NamespaceCondition { // Status of the condition, one of True, False, Unknown. optional string status = 2; + // Last time the condition transitioned from one status to another. // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + // Unique, one-word, CamelCase reason for the condition's last transition. // +optional optional string reason = 5; + // Human-readable message indicating details about last transition. // +optional optional string message = 6; } @@ -2783,7 +2796,7 @@ message NodeStatus { optional string phase = 3; // Conditions is an array of current observed node conditions. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // More info: https://kubernetes.io/docs/reference/node/node-status/#condition // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2793,7 +2806,7 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. @@ -2813,7 +2826,7 @@ message NodeStatus { optional NodeDaemonEndpoints daemonEndpoints = 6; // Set of ids/uuids to uniquely identify the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // More info: https://kubernetes.io/docs/reference/node/node-status/#info // +optional optional NodeSystemInfo nodeInfo = 7; @@ -3001,8 +3014,13 @@ message PersistentVolumeClaim { // PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about optional string type = 1; + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required optional string status = 2; // lastProbeTime is the time we probed the condition. @@ -3280,12 +3298,16 @@ message PersistentVolumeList { message PersistentVolumeSource { // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; @@ -3300,6 +3322,7 @@ message PersistentVolumeSource { // glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsPersistentVolumeSource glusterfs = 4; @@ -3310,6 +3333,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -3320,11 +3344,14 @@ message PersistentVolumeSource { optional ISCSIPersistentVolumeSource iscsi = 7; // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional optional CephFSPersistentVolumeSource cephfs = 9; @@ -3332,39 +3359,53 @@ message PersistentVolumeSource { // +optional optional FCVolumeSource fc = 10; - // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional optional FlockerVolumeSource flocker = 11; // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional optional FlexPersistentVolumeSource flexVolume = 12; // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional optional AzureFilePersistentVolumeSource azureFile = 13; - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional optional QuobyteVolumeSource quobyte = 15; // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional optional AzureDiskVolumeSource azureDisk = 16; - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional optional PortworxVolumeSource portworxVolume = 18; // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional optional ScaleIOPersistentVolumeSource scaleIO = 19; @@ -3372,12 +3413,13 @@ message PersistentVolumeSource { // +optional optional LocalVolumeSource local = 20; - // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; - // csi represents storage that is handled by an external CSI driver (Beta feature). + // csi represents storage that is handled by an external CSI driver. // +optional optional CSIPersistentVolumeSource csi = 22; } @@ -3710,9 +3752,11 @@ message PodDNSConfig { // PodDNSConfigOption defines DNS resolver options of a pod. message PodDNSConfigOption { + // Name is this DNS resolver option's name. // Required. optional string name = 1; + // Value is this DNS resolver option's value. // +optional optional string value = 2; } @@ -3803,7 +3847,8 @@ message PodLogOptions { optional bool timestamps = 6; // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime + // logs are shown from the creation of the container or sinceSeconds or sinceTime. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". // +optional optional int64 tailLines = 7; @@ -3821,6 +3866,14 @@ message PodLogOptions { // the actual log data coming from the real kubelet). // +optional optional bool insecureSkipTLSVerifyBackend = 9; + + // Specify which container log stream to return to the client. + // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr + // are returned interleaved. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". + // +featureGate=PodLogsQuerySplitStreams + // +optional + optional string stream = 10; } // PodOS defines the OS parameters of a pod. @@ -4029,6 +4082,33 @@ message PodSecurityContext { // Note that this field cannot be set when spec.os.name is windows. // +optional optional AppArmorProfile appArmorProfile = 11; + + // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + // Valid values are "MountOption" and "Recursive". + // + // "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + // + // "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + // "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + // + // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + // and "Recursive" for all other volumes. + // + // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + // + // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + // Note that this field cannot be set when spec.os.name is windows. + // +featureGate=SELinuxChangePolicy + // +optional + optional string seLinuxChangePolicy = 13; } // Describes the class of pods that should avoid this node. @@ -4386,6 +4466,21 @@ message PodSpec { // +featureGate=DynamicResourceAllocation // +optional repeated PodResourceClaim resourceClaims = 39; + + // Resources is the total amount of CPU and Memory resources required by all + // containers in the pod. It supports specifying Requests and Limits for + // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // + // This field enables fine-grained control over resource allocation for the + // entire pod, allowing resource sharing among containers in a pod. + // TODO: For beta graduation, expand this comment with a detailed explanation. + // + // This is an alpha field and requires enabling the PodLevelResources feature + // gate. + // + // +featureGate=PodLevelResources + // +optional + optional ResourceRequirements resources = 40; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -4477,14 +4572,26 @@ message PodStatus { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; - // The list has one entry per init container in the manifest. The most recent successful + // Statuses of init containers in this pod. The most recent successful non-restartable // init container will have ready = true, the most recently started container will have // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // Each init container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status // +listType=atomic repeated ContainerStatus initContainerStatuses = 10; - // The list has one entry per container in the manifest. + // Statuses of containers in this pod. + // Each container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic @@ -4496,7 +4603,14 @@ message PodStatus { // +optional optional string qosClass = 9; - // Status for any ephemeral containers that have run in this pod. + // Statuses for any ephemeral containers that have run in this pod. + // Each ephemeral container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic repeated ContainerStatus ephemeralContainerStatuses = 13; @@ -4571,6 +4685,7 @@ message PodTemplateSpec { optional PodSpec spec = 2; } +// PortStatus represents the error condition of a service port message PortStatus { // Port is the port number of the service port of which status is recorded here optional int32 port = 1; @@ -4695,19 +4810,19 @@ message Probe { // ProbeHandler defines a specific action that should be taken in a probe. // One and only one of the fields must be specified. message ProbeHandler { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional optional ExecAction exec = 1; - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional optional HTTPGetAction httpGet = 2; - // TCPSocket specifies an action involving a TCP port. + // TCPSocket specifies a connection to a TCP port. // +optional optional TCPSocketAction tcpSocket = 3; - // GRPC specifies an action involving a GRPC port. + // GRPC specifies a GRPC HealthCheckRequest. // +optional optional GRPCAction grpc = 4; } @@ -5036,7 +5151,7 @@ message ResourceFieldSelector { } // ResourceHealth represents the health of a resource. It has the latest device health information. -// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +// This is a part of KEP https://kep.k8s.io/4680. message ResourceHealth { // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. optional string resourceID = 1; @@ -5145,15 +5260,18 @@ message ResourceRequirements { repeated ResourceClaim claims = 3; } +// ResourceStatus represents the status of a single resource allocated to a Pod. message ResourceStatus { - // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. + // For DRA resources, the value must be "claim:/". + // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container. // +required optional string name = 1; - // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. - // At a minimum, ResourceID must uniquely identify the Resource - // allocated to the Pod on the Node for the lifetime of a Pod. - // See ResourceID type for it's definition. + // List of unique resources health. Each element in the list contains an unique resource ID and its health. + // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. + // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. + // See ResourceID type definition for a specific format it has in various use cases. // +listType=map // +listMapKey=resourceID repeated ResourceHealth resources = 2; @@ -5611,6 +5729,8 @@ message ServiceAccount { // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32. + // Prefer separate namespaces to isolate access to mounted secrets. // This field should not be used to find auto-generated service account token secrets for use outside of pods. // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -5996,7 +6116,7 @@ message ServiceSpec { // not set, the implementation will apply its default routing strategy. If set // to "PreferClose", implementations should prioritize endpoints that are // topologically close (e.g., same zone). - // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // This is a beta field and requires enabling ServiceTrafficDistribution feature. // +featureGate=ServiceTrafficDistribution // +optional optional string trafficDistribution = 23; @@ -6323,6 +6443,20 @@ message TopologySpreadConstraint { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic message TypedLocalObjectReference { // APIGroup is the group for the resource being referenced. @@ -6338,6 +6472,7 @@ message TypedLocalObjectReference { optional string name = 3; } +// TypedObjectReference contains enough information to let you locate the typed referenced object message TypedObjectReference { // APIGroup is the group for the resource being referenced. // If APIGroup is not specified, the specified Kind must be in the core API group. @@ -6538,18 +6673,22 @@ message VolumeSource { // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; // gitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional @@ -6572,6 +6711,7 @@ message VolumeSource { optional ISCSIVolumeSource iscsi = 8; // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -6583,25 +6723,31 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional optional FlexVolumeSource flexVolume = 12; // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional optional CephFSVolumeSource cephfs = 14; - // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional optional FlockerVolumeSource flocker = 15; @@ -6614,6 +6760,8 @@ message VolumeSource { optional FCVolumeSource fc = 17; // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional optional AzureFileVolumeSource azureFile = 18; @@ -6621,37 +6769,48 @@ message VolumeSource { // +optional optional ConfigMapVolumeSource configMap = 19; - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional optional QuobyteVolumeSource quobyte = 21; // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional optional AzureDiskVolumeSource azureDisk = 22; - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; // projected items for all in one resources secrets, configmaps, and downward API optional ProjectedVolumeSource projected = 26; - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional optional PortworxVolumeSource portworxVolume = 24; // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional optional ScaleIOVolumeSource scaleIO = 25; // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // +optional optional StorageOSVolumeSource storageos = 27; - // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. // +optional optional CSIVolumeSource csi = 28; diff --git a/vendor/k8s.io/api/core/v1/objectreference.go b/vendor/k8s.io/api/core/v1/objectreference.go index ee5335ee8..609cadc7a 100644 --- a/vendor/k8s.io/api/core/v1/objectreference.go +++ b/vendor/k8s.io/api/core/v1/objectreference.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// SetGroupVersionKind allows clients to preemptively get a reference to an API object and pass it to places that // intend only to get a reference to that object. This simplifies the event recording interface. func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 3a74138ba..fb2c1c745 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -63,16 +63,20 @@ type VolumeSource struct { EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` // gitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional @@ -91,6 +95,7 @@ type VolumeSource struct { // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` @@ -100,21 +105,27 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` - // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` // downwardAPI represents downward API about the pod that should populate this volume @@ -124,34 +135,47 @@ type VolumeSource struct { // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` // configMap represents a configMap that should populate this volume // +optional ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` // projected items for all in one resources secrets, configmaps, and downward API Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` - // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. // +optional CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` // ephemeral represents a volume that is handled by a cluster storage driver. @@ -219,11 +243,15 @@ type PersistentVolumeClaimVolumeSource struct { type PersistentVolumeSource struct { // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` @@ -236,6 +264,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` @@ -244,6 +273,7 @@ type PersistentVolumeSource struct { // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` @@ -252,50 +282,68 @@ type PersistentVolumeSource struct { // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` - // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` // local represents directly-attached storage with node affinity // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` - // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // csi represents storage that is handled by an external CSI driver (Beta feature). + // csi represents storage that is handled by an external CSI driver. // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -582,6 +630,7 @@ type PersistentVolumeClaimSpec struct { VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` } +// TypedObjectReference contains enough information to let you locate the typed referenced object type TypedObjectReference struct { // APIGroup is the group for the resource being referenced. // If APIGroup is not specified, the specified Kind must be in the core API group. @@ -688,8 +737,13 @@ type ModifyVolumeStatus struct { // PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about + Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // lastProbeTime is the time we probed the condition. // +optional LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` @@ -2015,7 +2069,7 @@ type KeyToPath struct { Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` } -// Local represents directly-attached storage with node affinity (Beta feature) +// Local represents directly-attached storage with node affinity type LocalVolumeSource struct { // path of the full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). @@ -2029,7 +2083,7 @@ type LocalVolumeSource struct { FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } -// Represents storage that is managed by an external CSI volume driver (Beta feature) +// Represents storage that is managed by an external CSI volume driver type CSIPersistentVolumeSource struct { // driver is the name of the driver to use for this volume. // Required. @@ -2476,6 +2530,7 @@ type TCPSocketAction struct { Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` } +// GRPCAction specifies an action involving a GRPC service. type GRPCAction struct { // Port number of the gRPC service. Number must be in the range 1 to 65535. Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"` @@ -2891,17 +2946,16 @@ type Container struct { // ProbeHandler defines a specific action that should be taken in a probe. // One and only one of the fields must be specified. type ProbeHandler struct { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` - // TCPSocket specifies an action involving a TCP port. + // TCPSocket specifies a connection to a TCP port. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` - - // GRPC specifies an action involving a GRPC port. + // GRPC specifies a GRPC HealthCheckRequest. // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2909,18 +2963,18 @@ type ProbeHandler struct { // LifecycleHandler defines a specific action that should be taken in a lifecycle // hook. One and only one of the fields, except TCPSocket must be specified. type LifecycleHandler struct { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - // for the backward compatibility. There are no validation of this field and - // lifecycle hooks will fail in runtime when tcp handler is specified. + // for backward compatibility. There is no validation of this field and + // lifecycle hooks will fail at runtime when it is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` - // Sleep represents the duration that the container should sleep before being terminated. + // Sleep represents a duration that the container should sleep. // +featureGate=PodLifecycleSleepAction // +optional Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` @@ -3071,7 +3125,7 @@ type ContainerStatus struct { // AllocatedResources represents the compute resources allocated for this container by the // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission // and after successfully admitting desired pod resize. - // +featureGate=InPlacePodVerticalScaling + // +featureGate=InPlacePodVerticalScalingAllocatedStatus // +optional AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` // Resources represents the compute resource requests and limits that have been successfully @@ -3102,14 +3156,17 @@ type ContainerStatus struct { AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"` } +// ResourceStatus represents the status of a single resource allocated to a Pod. type ResourceStatus struct { - // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. + // For DRA resources, the value must be "claim:/". + // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container. // +required Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"` - // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. - // At a minimum, ResourceID must uniquely identify the Resource - // allocated to the Pod on the Node for the lifetime of a Pod. - // See ResourceID type for it's definition. + // List of unique resources health. Each element in the list contains an unique resource ID and its health. + // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. + // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. + // See ResourceID type definition for a specific format it has in various use cases. // +listType=map // +listMapKey=resourceID Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` @@ -3126,16 +3183,16 @@ const ( // ResourceID is calculated based on the source of this resource health information. // For DevicePlugin: // -// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 +// DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 // // DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node. // For DRA: // -// dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. +// //: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. type ResourceID string // ResourceHealth represents the health of a resource. It has the latest device health information. -// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +// This is a part of KEP https://kep.k8s.io/4680. type ResourceHealth struct { // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"` @@ -3237,7 +3294,7 @@ const ( // during scheduling, for example due to nodeAffinity parsing errors. PodReasonSchedulerError = "SchedulerError" - // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // PodReasonTerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet PodReasonTerminationByKubelet = "TerminationByKubelet" @@ -4030,6 +4087,20 @@ type PodSpec struct { // +featureGate=DynamicResourceAllocation // +optional ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` + // Resources is the total amount of CPU and Memory resources required by all + // containers in the pod. It supports specifying Requests and Limits for + // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // + // This field enables fine-grained control over resource allocation for the + // entire pod, allowing resource sharing among containers in a pod. + // TODO: For beta graduation, expand this comment with a detailed explanation. + // + // This is an alpha field and requires enabling the PodLevelResources feature + // gate. + // + // +featureGate=PodLevelResources + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"` } // PodResourceClaim references exactly one ResourceClaim, either directly @@ -4308,6 +4379,22 @@ const ( SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict" ) +// PodSELinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. +type PodSELinuxChangePolicy string + +const ( + // Recursive relabeling of all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + SELinuxChangePolicyRecursive PodSELinuxChangePolicy = "Recursive" + // MountOption mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + SELinuxChangePolicyMountOption PodSELinuxChangePolicy = "MountOption" +) + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -4406,6 +4493,32 @@ type PodSecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"` + // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + // Valid values are "MountOption" and "Recursive". + // + // "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + // + // "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + // "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + // + // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + // and "Recursive" for all other volumes. + // + // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + // + // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + // Note that this field cannot be set when spec.os.name is windows. + // +featureGate=SELinuxChangePolicy + // +optional + SELinuxChangePolicy *PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty" protobuf:"bytes,13,opt,name=seLinuxChangePolicy"` } // SeccompProfile defines a pod/container's seccomp profile settings. @@ -4513,8 +4626,10 @@ type PodDNSConfig struct { // PodDNSConfigOption defines DNS resolver options of a pod. type PodDNSConfigOption struct { + // Name is this DNS resolver option's name. // Required. Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Value is this DNS resolver option's value. // +optional Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } @@ -4807,24 +4922,45 @@ type PodStatus struct { // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` - // The list has one entry per init container in the manifest. The most recent successful + // Statuses of init containers in this pod. The most recent successful non-restartable // init container will have ready = true, the most recently started container will have // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // Each init container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status // +listType=atomic InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` - // The list has one entry per container in the manifest. + // Statuses of containers in this pod. + // Each container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` - // Status for any ephemeral containers that have run in this pod. + + // Statuses for any ephemeral containers that have run in this pod. + // Each ephemeral container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` @@ -4867,6 +5003,7 @@ type PodStatusResult struct { // +genclient // +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers +// +genclient:method=UpdateResize,verb=update,subresource=resize // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.0 @@ -5558,7 +5695,7 @@ type ServiceSpec struct { // not set, the implementation will apply its default routing strategy. If set // to "PreferClose", implementations should prioritize endpoints that are // topologically close (e.g., same zone). - // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // This is a beta field and requires enabling ServiceTrafficDistribution feature. // +featureGate=ServiceTrafficDistribution // +optional TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"` @@ -5692,6 +5829,8 @@ type ServiceAccount struct { // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32. + // Prefer separate namespaces to isolate access to mounted secrets. // This field should not be used to find auto-generated service account token secrets for use outside of pods. // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -6092,7 +6231,7 @@ type NodeStatus struct { // +optional Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` // Conditions is an array of current observed node conditions. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // More info: https://kubernetes.io/docs/reference/node/node-status/#condition // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -6101,7 +6240,7 @@ type NodeStatus struct { Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. @@ -6119,7 +6258,7 @@ type NodeStatus struct { // +optional DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` // Set of ids/uuids to uniquely identify the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // More info: https://kubernetes.io/docs/reference/node/node-status/#info // +optional NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node @@ -6454,10 +6593,13 @@ type NamespaceCondition struct { Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` // Status of the condition, one of True, False, Unknown. Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } @@ -6508,7 +6650,6 @@ type NamespaceList struct { // +k8s:prerelease-lifecycle-gen:introduced=1.0 // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -6528,6 +6669,15 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +const ( + // LogStreamStdout is the stream type for stdout. + LogStreamStdout = "Stdout" + // LogStreamStderr is the stream type for stderr. + LogStreamStderr = "Stderr" + // LogStreamAll represents the combined stdout and stderr. + LogStreamAll = "All" +) + // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.0 @@ -6562,7 +6712,8 @@ type PodLogOptions struct { // +optional Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime + // logs are shown from the creation of the container or sinceSeconds or sinceTime. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". // +optional TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` // If set, the number of bytes to read from the server before terminating the @@ -6579,6 +6730,14 @@ type PodLogOptions struct { // the actual log data coming from the real kubelet). // +optional InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"` + + // Specify which container log stream to return to the client. + // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr + // are returned interleaved. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". + // +featureGate=PodLogsQuerySplitStreams + // +optional + Stream *string `json:"stream,omitempty" protobuf:"varint,10,opt,name=stream"` } // +k8s:conversion-gen:explicit-from=net/url.Values @@ -6779,13 +6938,23 @@ type ObjectReference struct { // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic type LocalObjectReference struct { // Name of the referent. // This field is effectively required, but due to backwards compatibility is // allowed to be empty. Instances of this type with an empty value here are // almost certainly wrong. - // TODO: Add other useful fields. apiVersion, kind, uid? // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional // +default="" @@ -6796,6 +6965,20 @@ type LocalObjectReference struct { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic type TypedLocalObjectReference struct { // APIGroup is the group for the resource being referenced. @@ -7729,7 +7912,6 @@ const ( ) // PortStatus represents the error condition of a service port - type PortStatus struct { // Port is the port number of the service port of which status is recorded here Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 950806ef8..89ce3d230 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -117,7 +117,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { } var map_Binding = map[string]string{ - "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -127,7 +127,7 @@ func (Binding) SwaggerDoc() map[string]string { } var map_CSIPersistentVolumeSource = map[string]string{ - "": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "": "Represents storage that is managed by an external CSI volume driver", "driver": "driver is the name of the driver to use for this volume. Required.", "volumeHandle": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", "readOnly": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", @@ -802,6 +802,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { } var map_GRPCAction = map[string]string{ + "": "GRPCAction specifies an action involving a GRPC service.", "port": "Port number of the gRPC service. Number must be in the range 1 to 65535.", "service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.", } @@ -967,10 +968,10 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LifecycleHandler = map[string]string{ "": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.", - "exec": "Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", - "sleep": "Sleep represents the duration that the container should sleep before being terminated.", + "exec": "Exec specifies a command to execute in the container.", + "httpGet": "HTTPGet specifies an HTTP GET request to perform.", + "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for backward compatibility. There is no validation of this field and lifecycle hooks will fail at runtime when it is specified.", + "sleep": "Sleep represents a duration that the container should sleep.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -1062,7 +1063,7 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { } var map_LocalVolumeSource = map[string]string{ - "": "Local represents directly-attached storage with node affinity (Beta feature)", + "": "Local represents directly-attached storage with node affinity", "path": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", "fsType": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", } @@ -1104,9 +1105,12 @@ func (Namespace) SwaggerDoc() map[string]string { } var map_NamespaceCondition = map[string]string{ - "": "NamespaceCondition contains details about state of namespace.", - "type": "Type of namespace controller condition.", - "status": "Status of the condition, one of True, False, Unknown.", + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Human-readable message indicating details about last transition.", } func (NamespaceCondition) SwaggerDoc() map[string]string { @@ -1315,10 +1319,10 @@ var map_NodeStatus = map[string]string{ "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", - "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", + "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info", "images": "List of container images on this node", "volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesAttached": "List of volumes that are attached to the node.", @@ -1398,6 +1402,8 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimCondition = map[string]string{ "": "PersistentVolumeClaimCondition contains details about state of pvc", + "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.", @@ -1483,28 +1489,28 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string { var map_PersistentVolumeSource = map[string]string{ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "nfs": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", "fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", + "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", + "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", + "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", + "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", + "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", + "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", + "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", + "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", "local": "local represents directly-attached storage with node affinity", - "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", - "csi": "csi represents storage that is handled by an external CSI driver (Beta feature).", + "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md", + "csi": "csi represents storage that is handled by an external CSI driver.", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1634,8 +1640,9 @@ func (PodDNSConfig) SwaggerDoc() map[string]string { } var map_PodDNSConfigOption = map[string]string{ - "": "PodDNSConfigOption defines DNS resolver options of a pod.", - "name": "Required.", + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Name is this DNS resolver option's name. Required.", + "value": "Value is this DNS resolver option's value.", } func (PodDNSConfigOption) SwaggerDoc() map[string]string { @@ -1683,9 +1690,10 @@ var map_PodLogOptions = map[string]string{ "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".", "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", + "stream": "Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".", } func (PodLogOptions) SwaggerDoc() map[string]string { @@ -1772,6 +1780,7 @@ var map_PodSecurityContext = map[string]string{ "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "seLinuxChangePolicy": "seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\".\n\n\"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.\n\n\"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled.\n\nIf not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes.\n\nThis field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.\n\nAll Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1828,6 +1837,7 @@ var map_PodSpec = map[string]string{ "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", + "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1846,10 +1856,10 @@ var map_PodStatus = map[string]string{ "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", + "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", "resourceClaimStatuses": "Status of resource claims.", } @@ -1899,6 +1909,7 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string { } var map_PortStatus = map[string]string{ + "": "PortStatus represents the error condition of a service port", "port": "Port is the port number of the service port of which status is recorded here", "protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", @@ -1966,10 +1977,10 @@ func (Probe) SwaggerDoc() map[string]string { var map_ProbeHandler = map[string]string{ "": "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.", - "exec": "Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port.", + "exec": "Exec specifies a command to execute in the container.", + "httpGet": "HTTPGet specifies an HTTP GET request to perform.", + "tcpSocket": "TCPSocket specifies a connection to a TCP port.", + "grpc": "GRPC specifies a GRPC HealthCheckRequest.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -2125,7 +2136,7 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { } var map_ResourceHealth = map[string]string{ - "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680.", "resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", "health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", } @@ -2188,8 +2199,9 @@ func (ResourceRequirements) SwaggerDoc() map[string]string { } var map_ResourceStatus = map[string]string{ - "name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", - "resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", + "": "ResourceStatus represents the status of a single resource allocated to a Pod.", + "name": "Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. For DRA resources, the value must be \"claim:/\". When this status is reported about a container, the \"claim_name\" and \"request\" must match one of the claims of this container.", + "resources": "List of unique resources health. Each element in the list contains an unique resource ID and its health. At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. See ResourceID type definition for a specific format it has in various use cases.", } func (ResourceStatus) SwaggerDoc() map[string]string { @@ -2391,7 +2403,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". The \"kubernetes.io/enforce-mountable-secrets\" annotation is deprecated since v1.32. Prefer separate namespaces to isolate access to mounted secrets. This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", } @@ -2475,7 +2487,7 @@ var map_ServiceSpec = map[string]string{ "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", "internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", - "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", + "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2628,6 +2640,7 @@ func (TypedLocalObjectReference) SwaggerDoc() map[string]string { } var map_TypedObjectReference = map[string]string{ + "": "TypedObjectReference contains enough information to let you locate the typed referenced object", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced", "name": "Name is the name of resource being referenced", @@ -2720,32 +2733,32 @@ var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", "hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "emptyDir": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "gitRepo": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", - "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", - "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", + "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", + "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", + "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", "downwardAPI": "downwardAPI represents downward API about the pod that should populate this volume", "fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", "configMap": "configMap represents a configMap that should populate this volume", - "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", + "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", + "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", + "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", "projected": "projected items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", + "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", + "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.", + "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", "ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", } diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 3d23f7f62..3f669092e 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -3935,6 +3935,11 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { *out = new(int64) **out = **in } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } return } @@ -4169,6 +4174,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(AppArmorProfile) (*in).DeepCopyInto(*out) } + if in.SELinuxChangePolicy != nil { + in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy + *out = new(PodSELinuxChangePolicy) + **out = **in + } return } @@ -4361,6 +4371,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/resource/v1alpha3/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go index aeb66561f..ffc21307d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/doc.go +++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package - +// +k8s:prerelease-lifecycle-gen=true // +groupName=resource.k8s.io // Package v1alpha3 is the v1alpha3 version of the resource API. diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go index 4ac01cc6f..540f7b818 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -26,8 +26,9 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -48,10 +49,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} } +func (*AllocatedDeviceStatus) ProtoMessage() {} +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{0} +} +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src) +} +func (m *AllocatedDeviceStatus) XXX_Size() int { + return m.Size() +} +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo + func (m *AllocationResult) Reset() { *m = AllocationResult{} } func (*AllocationResult) ProtoMessage() {} func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{0} + return fileDescriptor_66649ee9bbcd89d2, []int{1} } func (m *AllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +108,7 @@ var xxx_messageInfo_AllocationResult proto.InternalMessageInfo func (m *BasicDevice) Reset() { *m = BasicDevice{} } func (*BasicDevice) ProtoMessage() {} func (*BasicDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{1} + return fileDescriptor_66649ee9bbcd89d2, []int{2} } func (m *BasicDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +136,7 @@ var xxx_messageInfo_BasicDevice proto.InternalMessageInfo func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } func (*CELDeviceSelector) ProtoMessage() {} func (*CELDeviceSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{2} + return fileDescriptor_66649ee9bbcd89d2, []int{3} } func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +164,7 @@ var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo func (m *Device) Reset() { *m = Device{} } func (*Device) ProtoMessage() {} func (*Device) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{3} + return fileDescriptor_66649ee9bbcd89d2, []int{4} } func (m *Device) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +192,7 @@ var xxx_messageInfo_Device proto.InternalMessageInfo func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } func (*DeviceAllocationConfiguration) ProtoMessage() {} func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{4} + return fileDescriptor_66649ee9bbcd89d2, []int{5} } func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +220,7 @@ var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } func (*DeviceAllocationResult) ProtoMessage() {} func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{5} + return fileDescriptor_66649ee9bbcd89d2, []int{6} } func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +248,7 @@ var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } func (*DeviceAttribute) ProtoMessage() {} func (*DeviceAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{6} + return fileDescriptor_66649ee9bbcd89d2, []int{7} } func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +276,7 @@ var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } func (*DeviceClaim) ProtoMessage() {} func (*DeviceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{7} + return fileDescriptor_66649ee9bbcd89d2, []int{8} } func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +304,7 @@ var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } func (*DeviceClaimConfiguration) ProtoMessage() {} func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{8} + return fileDescriptor_66649ee9bbcd89d2, []int{9} } func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +332,7 @@ var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo func (m *DeviceClass) Reset() { *m = DeviceClass{} } func (*DeviceClass) ProtoMessage() {} func (*DeviceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{9} + return fileDescriptor_66649ee9bbcd89d2, []int{10} } func (m *DeviceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +360,7 @@ var xxx_messageInfo_DeviceClass proto.InternalMessageInfo func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } func (*DeviceClassConfiguration) ProtoMessage() {} func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{10} + return fileDescriptor_66649ee9bbcd89d2, []int{11} } func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +388,7 @@ var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } func (*DeviceClassList) ProtoMessage() {} func (*DeviceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{11} + return fileDescriptor_66649ee9bbcd89d2, []int{12} } func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +416,7 @@ var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } func (*DeviceClassSpec) ProtoMessage() {} func (*DeviceClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{12} + return fileDescriptor_66649ee9bbcd89d2, []int{13} } func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +444,7 @@ var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } func (*DeviceConfiguration) ProtoMessage() {} func (*DeviceConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{13} + return fileDescriptor_66649ee9bbcd89d2, []int{14} } func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +472,7 @@ var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } func (*DeviceConstraint) ProtoMessage() {} func (*DeviceConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{14} + return fileDescriptor_66649ee9bbcd89d2, []int{15} } func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +500,7 @@ var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } func (*DeviceRequest) ProtoMessage() {} func (*DeviceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{15} + return fileDescriptor_66649ee9bbcd89d2, []int{16} } func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +528,7 @@ var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } func (*DeviceRequestAllocationResult) ProtoMessage() {} func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{16} + return fileDescriptor_66649ee9bbcd89d2, []int{17} } func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +556,7 @@ var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } func (*DeviceSelector) ProtoMessage() {} func (*DeviceSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{17} + return fileDescriptor_66649ee9bbcd89d2, []int{18} } func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -552,43 +581,15 @@ func (m *DeviceSelector) XXX_DiscardUnknown() { var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo -func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } -func (*OpaqueDeviceConfiguration) ProtoMessage() {} -func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{18} -} -func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { - xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) -} -func (m *OpaqueDeviceConfiguration) XXX_Size() int { - return m.Size() -} -func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { - xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) -} - -var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo - -func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } -func (*PodSchedulingContext) ProtoMessage() {} -func (*PodSchedulingContext) Descriptor() ([]byte, []int) { +func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } +func (*NetworkDeviceData) ProtoMessage() {} +func (*NetworkDeviceData) Descriptor() ([]byte, []int) { return fileDescriptor_66649ee9bbcd89d2, []int{19} } -func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -596,83 +597,27 @@ func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte } return b[:n], nil } -func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContext.Merge(m, src) +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkDeviceData.Merge(m, src) } -func (m *PodSchedulingContext) XXX_Size() int { +func (m *NetworkDeviceData) XXX_Size() int { return m.Size() } -func (m *PodSchedulingContext) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) +func (m *NetworkDeviceData) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo -func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } -func (*PodSchedulingContextList) ProtoMessage() {} -func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor_66649ee9bbcd89d2, []int{20} } -func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextList.Merge(m, src) -} -func (m *PodSchedulingContextList) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo - -func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } -func (*PodSchedulingContextSpec) ProtoMessage() {} -func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{21} -} -func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) -} -func (m *PodSchedulingContextSpec) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo - -func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } -func (*PodSchedulingContextStatus) ProtoMessage() {} -func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{22} -} -func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -680,22 +625,22 @@ func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ( } return b[:n], nil } -func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) } -func (m *PodSchedulingContextStatus) XXX_Size() int { +func (m *OpaqueDeviceConfiguration) XXX_Size() int { return m.Size() } -func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{23} + return fileDescriptor_66649ee9bbcd89d2, []int{21} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +668,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{24} + return fileDescriptor_66649ee9bbcd89d2, []int{22} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +696,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{25} + return fileDescriptor_66649ee9bbcd89d2, []int{23} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -776,38 +721,10 @@ func (m *ResourceClaimList) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo -func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } -func (*ResourceClaimSchedulingStatus) ProtoMessage() {} -func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{26} -} -func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) -} -func (m *ResourceClaimSchedulingStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo - func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{27} + return fileDescriptor_66649ee9bbcd89d2, []int{24} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,7 +752,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{28} + return fileDescriptor_66649ee9bbcd89d2, []int{25} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,7 +780,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{29} + return fileDescriptor_66649ee9bbcd89d2, []int{26} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -891,7 +808,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{30} + return fileDescriptor_66649ee9bbcd89d2, []int{27} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -919,7 +836,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{31} + return fileDescriptor_66649ee9bbcd89d2, []int{28} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,7 +864,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourcePool) Reset() { *m = ResourcePool{} } func (*ResourcePool) ProtoMessage() {} func (*ResourcePool) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{32} + return fileDescriptor_66649ee9bbcd89d2, []int{29} } func (m *ResourcePool) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +892,7 @@ var xxx_messageInfo_ResourcePool proto.InternalMessageInfo func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } func (*ResourceSlice) ProtoMessage() {} func (*ResourceSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{33} + return fileDescriptor_66649ee9bbcd89d2, []int{30} } func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +920,7 @@ var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } func (*ResourceSliceList) ProtoMessage() {} func (*ResourceSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{34} + return fileDescriptor_66649ee9bbcd89d2, []int{31} } func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1031,7 +948,7 @@ var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } func (*ResourceSliceSpec) ProtoMessage() {} func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{35} + return fileDescriptor_66649ee9bbcd89d2, []int{32} } func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1057,6 +974,7 @@ func (m *ResourceSliceSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo func init() { + proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1alpha3.AllocatedDeviceStatus") proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult") proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice") proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry") @@ -1077,15 +995,11 @@ func init() { proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest") proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") + proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1alpha3.NetworkDeviceData") proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") - proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext") - proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList") - proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec") - proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference") proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus") proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec") proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus") proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate") @@ -1102,138 +1016,208 @@ func init() { } var fileDescriptor_66649ee9bbcd89d2 = []byte{ - // 2085 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, - 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, - 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, - 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, - 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, - 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, - 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, - 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, - 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, - 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, - 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, - 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, - 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, - 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, - 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, - 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, - 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, - 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, - 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, - 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, - 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, - 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, - 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, - 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, - 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, - 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, - 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, - 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, - 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, - 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, - 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, - 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, - 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, - 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, - 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, - 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, - 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, - 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, - 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, - 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, - 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, - 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, - 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, - 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, - 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, - 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, - 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, - 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, - 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, - 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, - 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, - 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, - 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, - 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, - 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, - 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, - 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, - 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, - 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, - 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, - 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, - 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, - 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, - 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, - 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, - 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, - 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, - 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, - 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, - 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, - 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, - 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, - 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, - 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, - 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, - 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, - 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, - 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, - 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, - 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, - 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, - 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, - 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, - 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, - 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, - 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, - 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, - 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, - 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, - 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, - 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, - 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, - 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, - 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, - 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, - 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, - 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, - 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, - 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, - 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, - 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, - 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, - 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, - 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, - 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, - 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, - 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, - 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, - 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, - 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, - 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, - 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, - 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, - 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, - 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, - 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, - 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, - 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, - 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, - 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, - 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, - 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, - 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, - 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, - 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, - 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, - 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, - 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, - 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, - 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, - 0x20, 0x64, 0x20, 0x00, 0x00, + // 2030 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0xcd, 0x6f, 0x1c, 0x57, + 0xdd, 0xb3, 0xe3, 0xcf, 0xdf, 0xfa, 0x2b, 0x2f, 0xa4, 0x38, 0xa6, 0xec, 0x3a, 0x53, 0x04, 0x4e, + 0x9b, 0xee, 0x36, 0x4e, 0xd5, 0x16, 0xc2, 0x01, 0x8f, 0xed, 0x06, 0x47, 0x89, 0xe3, 0x3c, 0xb7, + 0x11, 0x81, 0x12, 0x78, 0x9e, 0x7d, 0xb6, 0x07, 0xcf, 0xce, 0x4c, 0xe7, 0xbd, 0x71, 0xea, 0x0b, + 0xaa, 0xe0, 0x1e, 0xf1, 0x0f, 0x20, 0x0e, 0x48, 0x48, 0x5c, 0x80, 0xff, 0x00, 0x24, 0x90, 0x88, + 0xe0, 0x12, 0x09, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xce, 0xdc, 0x73, 0x42, 0xef, 0xcd, 0x9b, 0xcf, + 0xdd, 0x71, 0xc6, 0x55, 0xb1, 0xd2, 0xdb, 0xce, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xb8, 0x72, + 0xf8, 0x0e, 0x6b, 0xd9, 0x5e, 0x9b, 0xf8, 0x76, 0x3b, 0xa0, 0xcc, 0x0b, 0x03, 0x8b, 0xb6, 0x8f, + 0xae, 0x12, 0xc7, 0x3f, 0x20, 0xd7, 0xda, 0xfb, 0xd4, 0xa5, 0x01, 0xe1, 0xb4, 0xd3, 0xf2, 0x03, + 0x8f, 0x7b, 0xe8, 0xe5, 0x88, 0xba, 0x45, 0x7c, 0xbb, 0x15, 0x53, 0xb7, 0x62, 0xea, 0xc5, 0xd7, + 0xf7, 0x6d, 0x7e, 0x10, 0xee, 0xb6, 0x2c, 0xaf, 0xdb, 0xde, 0xf7, 0xf6, 0xbd, 0xb6, 0x64, 0xda, + 0x0d, 0xf7, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x22, 0x61, 0x8b, 0x46, 0x46, 0xb5, 0xe5, 0x05, 0x42, + 0x6d, 0x51, 0xe1, 0xe2, 0x9b, 0x29, 0x4d, 0x97, 0x58, 0x07, 0xb6, 0x4b, 0x83, 0xe3, 0xb6, 0x7f, + 0xb8, 0x9f, 0xb7, 0xf7, 0x34, 0x5c, 0xac, 0xdd, 0xa5, 0x9c, 0x0c, 0xd3, 0xd5, 0x2e, 0xe3, 0x0a, + 0x42, 0x97, 0xdb, 0xdd, 0x41, 0x35, 0x6f, 0x3d, 0x8f, 0x81, 0x59, 0x07, 0xb4, 0x4b, 0x8a, 0x7c, + 0xc6, 0xaf, 0x75, 0xb8, 0xb0, 0xea, 0x38, 0x9e, 0x25, 0x60, 0xeb, 0xf4, 0xc8, 0xb6, 0xe8, 0x0e, + 0x27, 0x3c, 0x64, 0xe8, 0xeb, 0x30, 0xde, 0x09, 0xec, 0x23, 0x1a, 0x2c, 0x68, 0x4b, 0xda, 0xf2, + 0x94, 0x39, 0xfb, 0xb8, 0xd7, 0x1c, 0xe9, 0xf7, 0x9a, 0xe3, 0xeb, 0x12, 0x8a, 0x15, 0x16, 0x2d, + 0xc1, 0xa8, 0xef, 0x79, 0xce, 0x42, 0x4d, 0x52, 0x4d, 0x2b, 0xaa, 0xd1, 0x6d, 0xcf, 0x73, 0xb0, + 0xc4, 0x48, 0x49, 0x52, 0xf2, 0x82, 0x5e, 0x90, 0x24, 0xa1, 0x58, 0x61, 0x91, 0x05, 0x60, 0x79, + 0x6e, 0xc7, 0xe6, 0xb6, 0xe7, 0xb2, 0x85, 0xd1, 0x25, 0x7d, 0xb9, 0xbe, 0xd2, 0x6e, 0xa5, 0x61, + 0x4e, 0x0e, 0xd6, 0xf2, 0x0f, 0xf7, 0x05, 0x80, 0xb5, 0x84, 0xff, 0x5a, 0x47, 0x57, 0x5b, 0x6b, + 0x31, 0x9f, 0x89, 0x94, 0x70, 0x48, 0x40, 0x0c, 0x67, 0xc4, 0xa2, 0x3b, 0x30, 0xda, 0x21, 0x9c, + 0x2c, 0x8c, 0x2d, 0x69, 0xcb, 0xf5, 0x95, 0xd7, 0x4b, 0xc5, 0x2b, 0xbf, 0xb5, 0x30, 0x79, 0xb8, + 0xf1, 0x11, 0xa7, 0x2e, 0x13, 0xc2, 0x93, 0xd3, 0xad, 0x13, 0x4e, 0xb0, 0x14, 0x84, 0x76, 0xa1, + 0xee, 0x52, 0xfe, 0xd0, 0x0b, 0x0e, 0x05, 0x70, 0x61, 0x5c, 0xca, 0xcd, 0x9a, 0x3d, 0x98, 0x9d, + 0xad, 0x2d, 0xc5, 0x20, 0xcf, 0x2d, 0xd8, 0xcc, 0xb9, 0x7e, 0xaf, 0x59, 0xdf, 0x4a, 0xe5, 0xe0, + 0xac, 0x50, 0xe3, 0xef, 0x1a, 0xcc, 0xab, 0x28, 0xd9, 0x9e, 0x8b, 0x29, 0x0b, 0x1d, 0x8e, 0x7e, + 0x04, 0x13, 0x91, 0xe3, 0x98, 0x8c, 0x50, 0x7d, 0xe5, 0xcd, 0x93, 0x95, 0x46, 0xda, 0x8a, 0x62, + 0xcc, 0x39, 0x75, 0xa6, 0x89, 0x08, 0xcf, 0x70, 0x2c, 0x15, 0xdd, 0x83, 0x69, 0xd7, 0xeb, 0xd0, + 0x1d, 0xea, 0x50, 0x8b, 0x7b, 0x81, 0x8c, 0x5e, 0x7d, 0x65, 0x29, 0xab, 0x45, 0xd4, 0x8a, 0xf0, + 0xff, 0x56, 0x86, 0xce, 0x9c, 0xef, 0xf7, 0x9a, 0xd3, 0x59, 0x08, 0xce, 0xc9, 0x31, 0x3e, 0xd5, + 0xa1, 0x6e, 0x12, 0x66, 0x5b, 0x91, 0x46, 0xf4, 0x53, 0x00, 0xc2, 0x79, 0x60, 0xef, 0x86, 0x5c, + 0x9e, 0x45, 0xc4, 0xfd, 0x9b, 0x27, 0x9f, 0x25, 0xc3, 0xde, 0x5a, 0x4d, 0x78, 0x37, 0x5c, 0x1e, + 0x1c, 0x9b, 0xaf, 0xc4, 0x19, 0x90, 0x22, 0x7e, 0xf6, 0xaf, 0xe6, 0xcc, 0xdd, 0x90, 0x38, 0xf6, + 0x9e, 0x4d, 0x3b, 0x5b, 0xa4, 0x4b, 0x71, 0x46, 0x23, 0x3a, 0x82, 0x49, 0x8b, 0xf8, 0xc4, 0xb2, + 0xf9, 0xf1, 0x42, 0x4d, 0x6a, 0x7f, 0xbb, 0xba, 0xf6, 0x35, 0xc5, 0x19, 0xe9, 0xbe, 0xa4, 0x74, + 0x4f, 0xc6, 0xe0, 0x41, 0xcd, 0x89, 0xae, 0x45, 0x07, 0xe6, 0x0a, 0xb6, 0xa3, 0x79, 0xd0, 0x0f, + 0xe9, 0x71, 0x54, 0x71, 0x58, 0xfc, 0x44, 0x6b, 0x30, 0x76, 0x44, 0x9c, 0x90, 0xca, 0xfa, 0xca, + 0x27, 0x6c, 0x79, 0x8c, 0x63, 0xa9, 0x38, 0xe2, 0xfd, 0x56, 0xed, 0x1d, 0x6d, 0xf1, 0x10, 0x66, + 0x72, 0xb6, 0x0e, 0xd1, 0xb5, 0x9e, 0xd7, 0xd5, 0x3a, 0xa9, 0xf6, 0x52, 0xe5, 0x77, 0x43, 0xe2, + 0x72, 0x9b, 0x1f, 0x67, 0x94, 0x19, 0x37, 0xe0, 0xdc, 0xda, 0xc6, 0x2d, 0xd5, 0x4f, 0x54, 0xdc, + 0xd1, 0x0a, 0x00, 0xfd, 0xc8, 0x0f, 0x28, 0x13, 0xb5, 0xa4, 0xba, 0x4a, 0x52, 0xae, 0x1b, 0x09, + 0x06, 0x67, 0xa8, 0x8c, 0x23, 0x50, 0x5d, 0x42, 0xf4, 0x19, 0x97, 0x74, 0xa9, 0xe2, 0x4b, 0x2a, + 0x51, 0xfa, 0x54, 0x62, 0xd0, 0x4d, 0x18, 0xdb, 0x15, 0x91, 0x51, 0xe6, 0x5f, 0xae, 0x1c, 0x44, + 0x73, 0xaa, 0xdf, 0x6b, 0x8e, 0x49, 0x00, 0x8e, 0x44, 0x18, 0x8f, 0x6a, 0xf0, 0xd5, 0x62, 0xc1, + 0xac, 0x79, 0xee, 0x9e, 0xbd, 0x1f, 0x06, 0xf2, 0x03, 0x7d, 0x07, 0xc6, 0x23, 0x91, 0xca, 0xa2, + 0xe5, 0xb8, 0xab, 0xed, 0x48, 0xe8, 0xb3, 0x5e, 0xf3, 0xa5, 0x22, 0x6b, 0x84, 0xc1, 0x8a, 0x0f, + 0x2d, 0xc3, 0x64, 0x40, 0x3f, 0x0c, 0x29, 0xe3, 0x4c, 0xe6, 0xdd, 0x94, 0x39, 0x2d, 0x52, 0x07, + 0x2b, 0x18, 0x4e, 0xb0, 0xe8, 0x63, 0x0d, 0xce, 0x47, 0x55, 0x99, 0xb3, 0x41, 0x55, 0xe4, 0xd5, + 0x2a, 0x39, 0x91, 0x63, 0x34, 0xbf, 0xa2, 0x8c, 0x3d, 0x3f, 0x04, 0x89, 0x87, 0xa9, 0x32, 0xfe, + 0xa3, 0xc1, 0x4b, 0xc3, 0x3b, 0x08, 0xda, 0x83, 0x89, 0x40, 0xfe, 0x8a, 0x8b, 0xf7, 0x7a, 0x15, + 0x83, 0xd4, 0x31, 0xcb, 0xfb, 0x51, 0xf4, 0xcd, 0x70, 0x2c, 0x1c, 0x59, 0x30, 0x6e, 0x49, 0x9b, + 0x54, 0x95, 0x5e, 0x3f, 0x5d, 0xbf, 0xcb, 0x7b, 0x20, 0x19, 0x42, 0x11, 0x18, 0x2b, 0xd1, 0xc6, + 0x6f, 0x35, 0x98, 0x2b, 0x54, 0x11, 0x6a, 0x80, 0x6e, 0xbb, 0x5c, 0xa6, 0x95, 0x1e, 0xc5, 0x68, + 0xd3, 0xe5, 0xf7, 0x44, 0xb2, 0x63, 0x81, 0x40, 0x97, 0x60, 0x74, 0x57, 0x8c, 0x40, 0x11, 0x8e, + 0x49, 0x73, 0xa6, 0xdf, 0x6b, 0x4e, 0x99, 0x9e, 0xe7, 0x44, 0x14, 0x12, 0x85, 0xbe, 0x01, 0xe3, + 0x8c, 0x07, 0xb6, 0xbb, 0xbf, 0x30, 0x2a, 0xb3, 0x45, 0xf6, 0xfb, 0x1d, 0x09, 0x89, 0xc8, 0x14, + 0x1a, 0xbd, 0x0a, 0x13, 0x47, 0x34, 0x90, 0x15, 0x32, 0x26, 0x29, 0x65, 0x37, 0xbd, 0x17, 0x81, + 0x22, 0xd2, 0x98, 0xc0, 0xf8, 0x7d, 0x0d, 0xea, 0x2a, 0x80, 0x0e, 0xb1, 0xbb, 0xe8, 0x7e, 0x26, + 0xa1, 0xa2, 0x48, 0xbc, 0x76, 0x8a, 0x48, 0x98, 0xf3, 0x71, 0xf3, 0x1a, 0x92, 0x81, 0x14, 0xea, + 0x96, 0xe7, 0x32, 0x1e, 0x10, 0xdb, 0x55, 0xe9, 0x9a, 0x6f, 0x10, 0x27, 0x25, 0x9e, 0x62, 0x33, + 0xcf, 0x2b, 0x05, 0xf5, 0x14, 0xc6, 0x70, 0x56, 0x2e, 0x7a, 0x90, 0x84, 0x58, 0x97, 0x1a, 0xde, + 0xaa, 0xa4, 0x41, 0x1c, 0xbe, 0x5a, 0x74, 0xff, 0xaa, 0xc1, 0x42, 0x19, 0x53, 0xae, 0x1e, 0xb5, + 0xcf, 0x54, 0x8f, 0xb5, 0xb3, 0xab, 0xc7, 0x3f, 0x69, 0x99, 0xd8, 0x33, 0x86, 0x7e, 0x0c, 0x93, + 0x62, 0x19, 0x92, 0xbb, 0x4d, 0xb4, 0x0e, 0xbc, 0x51, 0x6d, 0x75, 0xba, 0xb3, 0xfb, 0x13, 0x6a, + 0xf1, 0xdb, 0x94, 0x93, 0xb4, 0x19, 0xa7, 0x30, 0x9c, 0x48, 0x15, 0x9b, 0x13, 0xf3, 0xa9, 0x75, + 0x9a, 0x41, 0x24, 0x4d, 0xdb, 0xf1, 0xa9, 0x95, 0xf6, 0x6b, 0xf1, 0x85, 0xa5, 0x20, 0xe3, 0x97, + 0xd9, 0x60, 0x30, 0x96, 0x0f, 0x46, 0x99, 0x8b, 0xb5, 0xb3, 0x73, 0xf1, 0x1f, 0x93, 0x56, 0x20, + 0xed, 0xbb, 0x65, 0x33, 0x8e, 0x3e, 0x18, 0x70, 0x73, 0xab, 0x9a, 0x9b, 0x05, 0xb7, 0x74, 0x72, + 0x52, 0x65, 0x31, 0x24, 0xe3, 0xe2, 0x2d, 0x18, 0xb3, 0x39, 0xed, 0xc6, 0xf5, 0x75, 0xb9, 0xb2, + 0x8f, 0xcd, 0x19, 0x25, 0x75, 0x6c, 0x53, 0xf0, 0xe3, 0x48, 0x8c, 0xf1, 0x24, 0x7f, 0x02, 0xe1, + 0x7b, 0xf4, 0x43, 0x98, 0x62, 0x6a, 0x22, 0xc7, 0x5d, 0xe2, 0x4a, 0x15, 0x3d, 0xc9, 0x7a, 0x77, + 0x4e, 0xa9, 0x9a, 0x8a, 0x21, 0x0c, 0xa7, 0x12, 0x33, 0x15, 0x5c, 0x3b, 0x55, 0x05, 0x17, 0xe2, + 0x5f, 0x5a, 0xc1, 0x01, 0x0c, 0x0b, 0x20, 0xfa, 0x01, 0x8c, 0x7b, 0x3e, 0xf9, 0x30, 0xa4, 0x2a, + 0x2a, 0xcf, 0xd9, 0xe0, 0xee, 0x48, 0xda, 0x61, 0x69, 0x02, 0x42, 0x67, 0x84, 0xc6, 0x4a, 0xa4, + 0xf1, 0x48, 0x83, 0xf9, 0x62, 0x33, 0x3b, 0x45, 0xb7, 0xd8, 0x86, 0xd9, 0x2e, 0xe1, 0xd6, 0x41, + 0x32, 0x50, 0xd4, 0x5d, 0x69, 0xb9, 0xdf, 0x6b, 0xce, 0xde, 0xce, 0x61, 0x9e, 0xf5, 0x9a, 0xe8, + 0xdd, 0xd0, 0x71, 0x8e, 0xf3, 0x3b, 0x63, 0x81, 0xdf, 0xf8, 0xb9, 0x0e, 0x33, 0xb9, 0xde, 0x5d, + 0x61, 0x3b, 0x5a, 0x85, 0xb9, 0x4e, 0xea, 0x6c, 0x81, 0x50, 0x66, 0x7c, 0x59, 0x11, 0x67, 0x33, + 0x45, 0xf2, 0x15, 0xe9, 0xf3, 0xa9, 0xa3, 0x7f, 0xee, 0xa9, 0x73, 0x0f, 0x66, 0x49, 0x32, 0xad, + 0x6f, 0x7b, 0x1d, 0xaa, 0x66, 0x65, 0x4b, 0x71, 0xcd, 0xae, 0xe6, 0xb0, 0xcf, 0x7a, 0xcd, 0x2f, + 0x15, 0x67, 0xbc, 0x80, 0xe3, 0x82, 0x14, 0xf4, 0x0a, 0x8c, 0x59, 0x5e, 0xe8, 0x72, 0x39, 0x50, + 0xf5, 0xb4, 0x54, 0xd6, 0x04, 0x10, 0x47, 0x38, 0x74, 0x15, 0xea, 0xa4, 0xd3, 0xb5, 0xdd, 0x55, + 0xcb, 0xa2, 0x8c, 0xc9, 0x6b, 0xdc, 0x64, 0x34, 0xa5, 0x57, 0x53, 0x30, 0xce, 0xd2, 0x18, 0xff, + 0xd5, 0xe2, 0x1d, 0xb1, 0x64, 0x97, 0x41, 0x97, 0xc5, 0x66, 0x24, 0x51, 0x2a, 0x30, 0x99, 0xe5, + 0x46, 0x82, 0x71, 0x8c, 0xcf, 0x5c, 0xb7, 0x6b, 0x95, 0xae, 0xdb, 0x7a, 0x85, 0xeb, 0xf6, 0xe8, + 0x89, 0xd7, 0xed, 0xc2, 0x89, 0xc7, 0x2a, 0x9c, 0xf8, 0x03, 0x98, 0x2d, 0xec, 0xf4, 0x37, 0x41, + 0xb7, 0xa8, 0xa3, 0x8a, 0xee, 0x39, 0xb7, 0xde, 0x81, 0x1b, 0x81, 0x39, 0xd1, 0xef, 0x35, 0xf5, + 0xb5, 0x8d, 0x5b, 0x58, 0x08, 0x31, 0x7e, 0xa7, 0xc1, 0xb9, 0x81, 0x9b, 0x31, 0xba, 0x0e, 0x33, + 0xb6, 0xcb, 0x69, 0xb0, 0x47, 0x2c, 0xba, 0x95, 0xa6, 0xf8, 0x05, 0x75, 0xaa, 0x99, 0xcd, 0x2c, + 0x12, 0xe7, 0x69, 0xd1, 0x45, 0xd0, 0x6d, 0x3f, 0xde, 0xae, 0xa5, 0xb6, 0xcd, 0x6d, 0x86, 0x05, + 0x4c, 0xd4, 0xc3, 0x01, 0x09, 0x3a, 0x0f, 0x49, 0x40, 0x57, 0x3b, 0x1d, 0x71, 0xdf, 0x50, 0x3e, + 0x4d, 0xea, 0xe1, 0xbb, 0x79, 0x34, 0x2e, 0xd2, 0x1b, 0xbf, 0xd1, 0xe0, 0x62, 0x69, 0x27, 0xa9, + 0xfc, 0x80, 0x42, 0x00, 0x7c, 0x12, 0x90, 0x2e, 0xe5, 0x34, 0x60, 0x43, 0xa6, 0x6b, 0x85, 0x77, + 0x89, 0x64, 0x70, 0x6f, 0x27, 0x82, 0x70, 0x46, 0xa8, 0xf1, 0xab, 0x1a, 0xcc, 0x60, 0x15, 0x8f, + 0x68, 0x55, 0xfc, 0xff, 0xaf, 0x0b, 0x77, 0x73, 0xeb, 0xc2, 0x73, 0x52, 0x23, 0x67, 0x5c, 0xd9, + 0xc2, 0x80, 0xee, 0x8b, 0x25, 0x9a, 0xf0, 0x90, 0x55, 0xbb, 0xf8, 0xe4, 0x85, 0x4a, 0xc6, 0x34, + 0x08, 0xd1, 0x37, 0x56, 0x02, 0x8d, 0xbe, 0x06, 0x8d, 0x1c, 0xbd, 0xe8, 0xf4, 0x61, 0x97, 0x06, + 0x98, 0xee, 0xd1, 0x80, 0xba, 0x16, 0x45, 0x57, 0x60, 0x92, 0xf8, 0xf6, 0x8d, 0xc0, 0x0b, 0x7d, + 0x15, 0xd1, 0x64, 0x94, 0xaf, 0x6e, 0x6f, 0x4a, 0x38, 0x4e, 0x28, 0x04, 0x75, 0x6c, 0x91, 0xca, + 0xab, 0xcc, 0x7a, 0x1d, 0xc1, 0x71, 0x42, 0x91, 0xb4, 0xef, 0xd1, 0xd2, 0xf6, 0x6d, 0x82, 0x1e, + 0xda, 0x1d, 0x75, 0x27, 0x78, 0x43, 0x11, 0xe8, 0xef, 0x6f, 0xae, 0x3f, 0xeb, 0x35, 0x2f, 0x95, + 0x3d, 0xfe, 0xf1, 0x63, 0x9f, 0xb2, 0xd6, 0xfb, 0x9b, 0xeb, 0x58, 0x30, 0x1b, 0x7f, 0xd6, 0xe0, + 0x5c, 0xee, 0x90, 0x67, 0xb0, 0xd2, 0x6c, 0xe7, 0x57, 0x9a, 0xd7, 0x4e, 0x11, 0xb2, 0x92, 0xa5, + 0xc6, 0x2e, 0x1c, 0x42, 0x6e, 0x35, 0xef, 0x15, 0x1f, 0xc3, 0x2e, 0x57, 0xbe, 0x39, 0x94, 0xbf, + 0x80, 0x19, 0x7f, 0xab, 0xc1, 0xf9, 0x21, 0x59, 0x84, 0x1e, 0x00, 0xa4, 0x33, 0x66, 0x88, 0xd3, + 0x86, 0x28, 0x1c, 0xb8, 0xe7, 0xce, 0xca, 0x27, 0xaa, 0x14, 0x9a, 0x91, 0x88, 0x18, 0xd4, 0x03, + 0xca, 0x68, 0x70, 0x44, 0x3b, 0xef, 0x7a, 0x81, 0x72, 0xdd, 0xb7, 0x4f, 0xe1, 0xba, 0x81, 0xec, + 0x4d, 0xef, 0x5e, 0x38, 0x15, 0x8c, 0xb3, 0x5a, 0xd0, 0x83, 0xd4, 0x85, 0xd1, 0xdb, 0xeb, 0xb5, + 0x4a, 0x27, 0xca, 0x3f, 0x1b, 0x9f, 0xe0, 0xcc, 0x7f, 0x6a, 0x70, 0x21, 0x67, 0xe4, 0x7b, 0xb4, + 0xeb, 0x3b, 0x84, 0xd3, 0x33, 0x68, 0x46, 0xf7, 0x73, 0xcd, 0xe8, 0xed, 0x53, 0x78, 0x32, 0x36, + 0xb2, 0xf4, 0x16, 0xf3, 0x0f, 0x0d, 0x2e, 0x0e, 0xe5, 0x38, 0x83, 0xe2, 0xfa, 0x5e, 0xbe, 0xb8, + 0xae, 0x7d, 0x86, 0x73, 0x95, 0xdf, 0x1c, 0x2e, 0x96, 0xfa, 0xe1, 0x0b, 0x39, 0x3d, 0x8c, 0x3f, + 0x68, 0x30, 0x1d, 0x53, 0x8a, 0x75, 0xa9, 0xc2, 0xce, 0xbc, 0x02, 0xa0, 0xfe, 0x30, 0x89, 0x6f, + 0xf7, 0x7a, 0x6a, 0xf7, 0x8d, 0x04, 0x83, 0x33, 0x54, 0xe8, 0x26, 0xa0, 0xd8, 0xc2, 0x1d, 0x47, + 0x2e, 0x05, 0x62, 0xf5, 0xd4, 0x25, 0xef, 0xa2, 0xe2, 0x45, 0x78, 0x80, 0x02, 0x0f, 0xe1, 0x32, + 0xfe, 0xa2, 0xa5, 0x73, 0x5b, 0x82, 0x5f, 0x54, 0xcf, 0x4b, 0xe3, 0x4a, 0x3d, 0x9f, 0x9d, 0x3b, + 0x92, 0xf2, 0x85, 0x9d, 0x3b, 0xd2, 0xba, 0x92, 0x92, 0x78, 0xa4, 0x17, 0x4e, 0x21, 0x4b, 0xa1, + 0xea, 0x96, 0x77, 0x2b, 0xf3, 0x37, 0x59, 0x7d, 0xe5, 0xd5, 0x6a, 0xe6, 0x88, 0x34, 0x1d, 0xba, + 0xe3, 0x5f, 0x81, 0x49, 0xd7, 0xeb, 0x44, 0xfb, 0x70, 0x61, 0xbb, 0xd8, 0x52, 0x70, 0x9c, 0x50, + 0x0c, 0xfc, 0x91, 0x33, 0xfa, 0xf9, 0xfc, 0x91, 0x23, 0x37, 0x22, 0xc7, 0x11, 0x04, 0xf1, 0xf5, + 0x21, 0xdd, 0x88, 0x14, 0x1c, 0x27, 0x14, 0xe8, 0x4e, 0x3a, 0x5f, 0xc6, 0x65, 0x4c, 0xbe, 0x56, + 0x65, 0x44, 0x97, 0x0f, 0x14, 0xd3, 0x7c, 0xfc, 0xb4, 0x31, 0xf2, 0xe4, 0x69, 0x63, 0xe4, 0x93, + 0xa7, 0x8d, 0x91, 0x8f, 0xfb, 0x0d, 0xed, 0x71, 0xbf, 0xa1, 0x3d, 0xe9, 0x37, 0xb4, 0x4f, 0xfa, + 0x0d, 0xed, 0xd3, 0x7e, 0x43, 0xfb, 0xc5, 0xbf, 0x1b, 0x23, 0xdf, 0x7f, 0xf9, 0xa4, 0x7f, 0x95, + 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x60, 0x85, 0x64, 0x74, 0x1e, 0x00, 0x00, +} + +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NetworkData != nil { + { + size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { @@ -1256,11 +1240,6 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.Controller) - copy(dAtA[i:], m.Controller) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) - i-- - dAtA[i] = 0x22 if m.NodeSelector != nil { { size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -1835,18 +1814,6 @@ func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.SuitableNodes != nil { - { - size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } if len(m.Config) > 0 { for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { { @@ -1972,14 +1939,16 @@ func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i-- - if m.AdminAccess { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - i-- - dAtA[i] = 0x30 i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) i-- dAtA[i] = 0x28 @@ -2035,6 +2004,16 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } i -= len(m.Device) copy(dAtA[i:], m.Device) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) @@ -2093,7 +2072,7 @@ func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2103,88 +2082,39 @@ func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) i-- dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2194,32 +2124,18 @@ func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2227,84 +2143,15 @@ func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PotentialNodes) > 0 { - for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PotentialNodes[iNdEx]) - copy(dAtA[i:], m.PotentialNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.SelectedNode) - copy(dAtA[i:], m.SelectedNode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2448,43 +2295,6 @@ func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.UnsuitableNodes) > 0 { - for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UnsuitableNodes[iNdEx]) - copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2505,11 +2315,6 @@ func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.Controller) - copy(dAtA[i:], m.Controller) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) - i-- - dAtA[i] = 0x12 { size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2543,14 +2348,20 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i-- - if m.DeallocationRequested { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i-- - dAtA[i] = 0x18 if len(m.ReservedFor) > 0 { for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { { @@ -2925,6 +2736,33 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *AllocatedDeviceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *AllocationResult) Size() (n int) { if m == nil { return 0 @@ -2937,8 +2775,6 @@ func (m *AllocationResult) Size() (n int) { l = m.NodeSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.Controller) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3161,10 +2997,6 @@ func (m *DeviceClassSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.SuitableNodes != nil { - l = m.SuitableNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -3219,7 +3051,9 @@ func (m *DeviceRequest) Size() (n int) { l = len(m.AllocationMode) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.Count)) - n += 2 + if m.AdminAccess != nil { + n += 2 + } return n } @@ -3237,6 +3071,9 @@ func (m *DeviceRequestAllocationResult) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Device) n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } return n } @@ -3253,6 +3090,25 @@ func (m *DeviceSelector) Size() (n int) { return n } +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *OpaqueDeviceConfiguration) Size() (n int) { if m == nil { return 0 @@ -3266,7 +3122,7 @@ func (m *OpaqueDeviceConfiguration) Size() (n int) { return n } -func (m *PodSchedulingContext) Size() (n int) { +func (m *ResourceClaim) Size() (n int) { if m == nil { return 0 } @@ -3281,7 +3137,24 @@ func (m *PodSchedulingContext) Size() (n int) { return n } -func (m *PodSchedulingContextList) Size() (n int) { +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { if m == nil { return 0 } @@ -3298,113 +3171,13 @@ func (m *PodSchedulingContextList) Size() (n int) { return n } -func (m *PodSchedulingContextSpec) Size() (n int) { +func (m *ResourceClaimSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.SelectedNode) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.PotentialNodes) > 0 { - for _, s := range m.PotentialNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for _, e := range m.ResourceClaims { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaim) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimConsumerReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimSchedulingStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UnsuitableNodes) > 0 { - for _, s := range m.UnsuitableNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Devices.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Controller) + l = m.Devices.Size() n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3425,7 +3198,12 @@ func (m *ResourceClaimStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3547,14 +3325,33 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&AllocatedDeviceStatus{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, + `}`, + }, "") + return s +} func (this *AllocationResult) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&AllocationResult{`, `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, `}`, }, "") return s @@ -3700,7 +3497,7 @@ func (this *DeviceClass) String() string { return "nil" } s := strings.Join([]string{`&DeviceClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -3726,7 +3523,7 @@ func (this *DeviceClassList) String() string { } repeatedStringForItems += "}" s := strings.Join([]string{`&DeviceClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") @@ -3749,7 +3546,6 @@ func (this *DeviceClassSpec) String() string { s := strings.Join([]string{`&DeviceClassSpec{`, `Selectors:` + repeatedStringForSelectors + `,`, `Config:` + repeatedStringForConfig + `,`, - `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, `}`, }, "") return s @@ -3790,7 +3586,7 @@ func (this *DeviceRequest) String() string { `Selectors:` + repeatedStringForSelectors + `,`, `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `AdminAccess:` + fmt.Sprintf("%v", this.AdminAccess) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, `}`, }, "") return s @@ -3804,6 +3600,7 @@ func (this *DeviceRequestAllocationResult) String() string { `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, `}`, }, "") return s @@ -3818,67 +3615,25 @@ func (this *DeviceSelector) String() string { }, "") return s } -func (this *OpaqueDeviceConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContext{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSchedulingContext{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingContextList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextSpec) String() string { +func (this *NetworkDeviceData) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodSchedulingContextSpec{`, - `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, - `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, + s := strings.Join([]string{`&NetworkDeviceData{`, + `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`, + `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`, + `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`, `}`, }, "") return s } -func (this *PodSchedulingContextStatus) String() string { +func (this *OpaqueDeviceConfiguration) String() string { if this == nil { return "nil" } - repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" - for _, f := range this.ResourceClaims { - repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingContextStatus{`, - `ResourceClaims:` + repeatedStringForResourceClaims + `,`, + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3888,7 +3643,7 @@ func (this *ResourceClaim) String() string { return "nil" } s := strings.Join([]string{`&ResourceClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3918,30 +3673,18 @@ func (this *ResourceClaimList) String() string { } repeatedStringForItems += "}" s := strings.Join([]string{`&ResourceClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *ResourceClaimSchedulingStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, - `}`, - }, "") - return s -} func (this *ResourceClaimSpec) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ResourceClaimSpec{`, `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, - `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, `}`, }, "") return s @@ -3955,10 +3698,15 @@ func (this *ResourceClaimStatus) String() string { repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," } repeatedStringForReservedFor += "}" + repeatedStringForDevices := "[]AllocatedDeviceStatus{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" s := strings.Join([]string{`&ResourceClaimStatus{`, `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, `ReservedFor:` + repeatedStringForReservedFor + `,`, - `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, + `Devices:` + repeatedStringForDevices + `,`, `}`, }, "") return s @@ -3968,7 +3716,7 @@ func (this *ResourceClaimTemplate) String() string { return "nil" } s := strings.Join([]string{`&ResourceClaimTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -3984,7 +3732,7 @@ func (this *ResourceClaimTemplateList) String() string { } repeatedStringForItems += "}" s := strings.Join([]string{`&ResourceClaimTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") @@ -3995,7 +3743,7 @@ func (this *ResourceClaimTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4018,7 +3766,7 @@ func (this *ResourceSlice) String() string { return "nil" } s := strings.Join([]string{`&ResourceSlice{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4034,7 +3782,7 @@ func (this *ResourceSliceList) String() string { } repeatedStringForItems += "}" s := strings.Join([]string{`&ResourceSliceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") @@ -4053,7 +3801,7 @@ func (this *ResourceSliceSpec) String() string { `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, `Devices:` + repeatedStringForDevices + `,`, `}`, @@ -4068,7 +3816,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AllocationResult) Unmarshal(dAtA []byte) error { +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4091,17 +3839,17 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4111,30 +3859,29 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4144,31 +3891,27 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v1.NodeSelector{} - } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Pool = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4196,61 +3939,11 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Controller = string(dAtA[iNdEx:postIndex]) + m.Device = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BasicDevice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4277,109 +3970,14 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Attributes == nil { - m.Attributes = make(map[QualifiedName]DeviceAttribute) + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey QualifiedName - mapvalue := &DeviceAttribute{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &DeviceAttribute{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attributes[QualifiedName(mapkey)] = *mapvalue iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4406,161 +4004,15 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Capacity == nil { - m.Capacity = make(map[QualifiedName]resource.Quantity) - } - var mapkey QualifiedName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Capacity[QualifiedName(mapkey)] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4570,23 +4022,27 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4609,7 +4065,7 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *Device) Unmarshal(dAtA []byte) error { +func (m *AllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4632,17 +4088,17 @@ func (m *Device) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Device: wiretype end group for non-group") + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4652,27 +4108,28 @@ func (m *Device) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4699,10 +4156,10 @@ func (m *Device) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Basic == nil { - m.Basic = &BasicDevice{} + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} } - if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4727,7 +4184,7 @@ func (m *Device) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { +func (m *BasicDevice) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4750,17 +4207,17 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4770,59 +4227,124 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4849,9 +4371,105 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Capacity[QualifiedName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4874,7 +4492,7 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4897,51 +4515,17 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, DeviceRequestAllocationResult{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4951,25 +4535,23 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = append(m.Config, DeviceAllocationConfiguration{}) - if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4992,7 +4574,7 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { +func (m *Device) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5012,59 +4594,18 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { break } } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IntValue = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.BoolValue = &b - case 4: + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5092,14 +4633,13 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.StringValue = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5109,24 +4649,27 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.VersionValue = &s + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5149,7 +4692,7 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClaim) Unmarshal(dAtA []byte) error { +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5172,17 +4715,17 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5192,31 +4735,29 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, DeviceRequest{}) - if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5226,29 +4767,27 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Constraints = append(m.Constraints, DeviceConstraint{}) - if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5275,8 +4814,7 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = append(m.Config, DeviceClaimConfiguration{}) - if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5301,7 +4839,7 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5324,17 +4862,17 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5344,27 +4882,29 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5391,7 +4931,8 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5416,7 +4957,7 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClass) Unmarshal(dAtA []byte) error { +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5439,17 +4980,58 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5459,30 +5041,30 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5492,24 +5074,24 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5532,7 +5114,7 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5542,28 +5124,96 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowGenerated } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5590,7 +5240,8 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5615,7 +5266,7 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClassList) Unmarshal(dAtA []byte) error { +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5638,17 +5289,17 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5658,28 +5309,27 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5706,8 +5356,7 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DeviceClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5732,7 +5381,7 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { +func (m *DeviceClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5755,15 +5404,15 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5790,48 +5439,13 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Selectors = append(m.Selectors, DeviceSelector{}) - if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Config = append(m.Config, DeviceClassConfiguration{}) - if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5858,10 +5472,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SuitableNodes == nil { - m.SuitableNodes = &v1.NodeSelector{} - } - if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5886,7 +5497,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5909,15 +5520,15 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5944,10 +5555,7 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Opaque == nil { - m.Opaque = &OpaqueDeviceConfiguration{} - } - if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5972,7 +5580,7 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5995,17 +5603,17 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6015,29 +5623,30 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6047,24 +5656,25 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := FullyQualifiedName(dAtA[iNdEx:postIndex]) - m.MatchAttribute = &s + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6085,102 +5695,38 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { if iNdEx > l { return io.ErrUnexpectedEOF } - return nil -} -func (m *DeviceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.DeviceClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } @@ -6214,11 +5760,11 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6228,63 +5774,26 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.AdminAccess = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6306,7 +5815,7 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6329,17 +5838,17 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6349,59 +5858,81 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Request = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6429,11 +5960,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Pool = string(dAtA[iNdEx:postIndex]) + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6461,7 +5992,8 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Device = string(dAtA[iNdEx:postIndex]) + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s iNdEx = postIndex default: iNdEx = preIndex @@ -6484,7 +6016,7 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceSelector) Unmarshal(dAtA []byte) error { +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6507,15 +6039,79 @@ func (m *DeviceSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6542,66 +6138,14 @@ func (m *DeviceSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CEL == nil { - m.CEL = &CELDeviceSelector{} - } - if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6629,13 +6173,13 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - var msglen int + m.Count = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6645,25 +6189,32 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Count |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + b := bool(v != 0) + m.AdminAccess = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6685,7 +6236,7 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6708,17 +6259,17 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6728,30 +6279,29 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Request = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6761,30 +6311,29 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6794,25 +6343,77 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6834,7 +6435,7 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6847,58 +6448,25 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6925,8 +6493,10 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodSchedulingContext{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6951,7 +6521,7 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6974,15 +6544,15 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7010,11 +6580,43 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SelectedNode = string(dAtA[iNdEx:postIndex]) + m.InterfaceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7042,7 +6644,7 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) + m.HardwareAddress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -7065,7 +6667,7 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7088,15 +6690,47 @@ func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7123,8 +6757,7 @@ func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) - if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7593,120 +7226,6 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7769,38 +7288,6 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7921,11 +7408,11 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7935,12 +7422,26 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DeallocationRequested = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, AllocatedDeviceStatus{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8820,7 +8321,7 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.NodeSelector == nil { - m.NodeSelector = &v1.NodeSelector{} + m.NodeSelector = &v11.NodeSelector{} } if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index b4428ad45..e802a0143 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -30,6 +30,56 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/resource/v1alpha3"; +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +message AllocatedDeviceStatus { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 2; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 3; + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4; + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5; + + // NetworkData contains network-related information specific to the device. + // + // +optional + optional NetworkDeviceData networkData = 6; +} + // AllocationResult contains attributes of an allocated resource. message AllocationResult { // Devices is the result of allocating devices. @@ -42,22 +92,6 @@ message AllocationResult { // // +optional optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; - - // Controller is the name of the DRA driver which handled the - // allocation. That driver is also responsible for deallocating the - // claim. It is empty when the claim can be deallocated without - // involving a driver. - // - // A driver may allocate devices provided by other drivers, so this - // driver name here can be different from the driver names listed for - // the results. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional string controller = 4; } // BasicDevice defines one device instance. @@ -128,6 +162,10 @@ message CELDeviceSelector { // // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // // +required optional string expression = 1; } @@ -309,22 +347,6 @@ message DeviceClassSpec { // +optional // +listType=atomic repeated DeviceClassConfiguration config = 2; - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a claim that has not been allocated yet *and* that claim - // gets allocated through a control plane controller. It is ignored - // when the claim does not use a control plane controller - // for allocation. - // - // Setting this field is optional. If unset, all Nodes are candidates. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3; } // DeviceConfiguration must have exactly one field set. It gets embedded @@ -443,8 +465,12 @@ message DeviceRequest { // all ordinary claims to the device with respect to access modes and // any resource allocations. // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // // +optional - // +default=false + // +featureGate=DRAAdminAccess optional bool adminAccess = 6; } @@ -481,6 +507,18 @@ message DeviceRequestAllocationResult { // // +required optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; } // DeviceSelector must have exactly one field set. @@ -492,6 +530,37 @@ message DeviceSelector { optional CELDeviceSelector cel = 1; } +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +message NetworkDeviceData { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + optional string interfaceName = 1; + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + repeated string ips = 2; + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + optional string hardwareAddress = 3; +} + // OpaqueDeviceConfiguration contains configuration parameters for a driver // in a format defined by the driver vendor. message OpaqueDeviceConfiguration { @@ -512,73 +581,12 @@ message OpaqueDeviceConfiguration { // includes self-identification and a version ("kind" + "apiVersion" for // Kubernetes types), with conversion between different versions. // + // The length of the raw data must be smaller or equal to 10 Ki. + // // +required optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; } -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DRAControlPlaneController -// feature gate. -message PodSchedulingContext { - // Standard object metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes where resources for the Pod are needed. - optional PodSchedulingContextSpec spec = 2; - - // Status describes where resources for the Pod can be allocated. - // - // +optional - optional PodSchedulingContextStatus status = 3; -} - -// PodSchedulingContextList is a collection of Pod scheduling objects. -message PodSchedulingContextList { - // Standard list metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of PodSchedulingContext objects. - repeated PodSchedulingContext items = 2; -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -message PodSchedulingContextSpec { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // - // +optional - optional string selectedNode = 1; - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - repeated string potentialNodes = 2; -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -message PodSchedulingContextStatus { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - repeated ResourceClaimSchedulingStatus resourceClaims = 1; -} - // ResourceClaim describes a request for access to resources in the cluster, // for use by workloads. For example, if a workload needs an accelerator device // with specific properties, this is how that request is expressed. The status @@ -634,46 +642,12 @@ message ResourceClaimList { repeated ResourceClaim items = 2; } -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -message ResourceClaimSchedulingStatus { - // Name matches the pod.spec.resourceClaims[*].Name field. - // - // +required - optional string name = 1; - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - repeated string unsuitableNodes = 2; -} - // ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. message ResourceClaimSpec { // Devices defines how to request devices. // // +optional optional DeviceClaim devices = 1; - - // Controller is the name of the DRA driver that is meant - // to handle allocation of this claim. If empty, allocation is handled - // by the scheduler while scheduling a pod. - // - // Must be a DNS subdomain and should end with a DNS domain owned by the - // vendor of the driver. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional string controller = 2; } // ResourceClaimStatus tracks whether the resource has been allocated and what @@ -701,7 +675,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -711,19 +685,17 @@ message ResourceClaimStatus { // +patchMergeKey=uid repeated ResourceClaimConsumerReference reservedFor = 2; - // Indicates that a claim is to be deallocated. While this is set, - // no new consumers may be added to ReservedFor. - // - // This is only used if the claim needs to be deallocated by a DRA driver. - // That driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. // // +optional - // +featureGate=DRAControlPlaneController - optional bool deallocationRequested = 3; + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + repeated AllocatedDeviceStatus devices = 4; } // ResourceClaimTemplate is used to produce ResourceClaim objects. @@ -755,7 +727,7 @@ message ResourceClaimTemplateList { // ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. message ResourceClaimTemplateSpec { - // ObjectMeta may contain labels and annotations that will be copied into the PVC + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim // when creating it. No other fields are allowed and will be rejected during // validation. // +optional diff --git a/vendor/k8s.io/api/resource/v1alpha3/register.go b/vendor/k8s.io/api/resource/v1alpha3/register.go index 74044e8cf..8573758e3 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/register.go +++ b/vendor/k8s.io/api/resource/v1alpha3/register.go @@ -50,8 +50,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, - &PodSchedulingContext{}, - &PodSchedulingContextList{}, &ResourceSlice{}, &ResourceSliceList{}, ) diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index 4efd2491d..49d7c86de 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -37,6 +37,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceSlice // ResourceSlice represents one or more resources in a pool of similar resources, // managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many @@ -144,6 +145,10 @@ type ResourceSliceSpec struct { Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` } +// DriverNameMaxLength is the maximum valid length of a driver name in the +// ResourceSliceSpec and other places. It's the same as for CSI driver names. +const DriverNameMaxLength = 63 + // ResourcePool describes the pool that ResourceSlices belong to. type ResourcePool struct { // Name is used to identify the pool. For node-local devices, this @@ -220,7 +225,7 @@ type BasicDevice struct { Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` } -// Limit for the sum of the number of entries in both ResourceSlices. +// Limit for the sum of the number of entries in both attributes and capacity. const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 // QualifiedName is the name of a device attribute or capacity. @@ -244,6 +249,9 @@ type QualifiedName string // FullyQualifiedName is a QualifiedName where the domain is set. type FullyQualifiedName string +// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name. +const DeviceMaxDomainLength = 63 + // DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). const DeviceMaxIDLength = 32 @@ -284,6 +292,7 @@ const DeviceAttributeMaxValueLength = 64 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceSliceList // ResourceSliceList is a collection of ResourceSlices. type ResourceSliceList struct { @@ -298,7 +307,8 @@ type ResourceSliceList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaim // ResourceClaim describes a request for access to resources in the cluster, // for use by workloads. For example, if a workload needs an accelerator device @@ -330,19 +340,10 @@ type ResourceClaimSpec struct { // +optional Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` - // Controller is the name of the DRA driver that is meant - // to handle allocation of this claim. If empty, allocation is handled - // by the scheduler while scheduling a pod. - // - // Must be a DNS subdomain and should end with a DNS domain owned by the - // vendor of the driver. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` } // DeviceClaim defines how to request devices with a ResourceClaim. @@ -368,6 +369,12 @@ type DeviceClaim struct { // +optional // +listType=atomic Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` + + // Potential future extension, ignored by older schedulers. This is + // fine because scoring allows users to define a preference, without + // making it a hard requirement. + // + // Score *SomeScoringStruct } const ( @@ -451,9 +458,13 @@ type DeviceRequest struct { // all ordinary claims to the device with respect to access modes and // any resource allocations. // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // // +optional - // +default=false - AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` } const ( @@ -526,10 +537,42 @@ type CELDeviceSelector struct { // // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // // +required Expression string `json:"expression" protobuf:"bytes,1,name=expression"` } +// CELSelectorExpressionMaxCost specifies the cost limit for a single CEL selector +// evaluation. +// +// There is no overall budget for selecting a device, so the actual time +// required for that is proportional to the number of CEL selectors and how +// often they need to be evaluated, which can vary depending on several factors +// (number of devices, cluster utilization, additional constraints). +// +// Validation against this limit and [CELSelectorExpressionMaxLength] happens +// only when setting an expression for the first time or when changing it. If +// the limits are changed in a future Kubernetes release, existing users are +// guaranteed that existing expressions will continue to be valid. +// +// However, the kube-scheduler also applies this cost limit at runtime, so it +// could happen that a valid expression fails at runtime after an up- or +// downgrade. This can also happen without version skew when the cost estimate +// underestimated the actual cost. That this might happen is the reason why +// kube-scheduler enforces the runtime limit instead of relying on validation. +// +// According to +// https://github.com/kubernetes/kubernetes/blob/4aeaf1e99e82da8334c0d6dddd848a194cd44b4f/staging/src/k8s.io/apiserver/pkg/apis/cel/config.go#L20-L22, +// this gives roughly 0.1 second for each expression evaluation. +// However, this depends on how fast the machine is. +const CELSelectorExpressionMaxCost = 1000000 + +// CELSelectorExpressionMaxLength is the maximum length of a CEL selector expression string. +const CELSelectorExpressionMaxLength = 10 * 1024 + // DeviceConstraint must have exactly one field set besides Requests. type DeviceConstraint struct { // Requests is a list of the one or more requests in this claim which @@ -558,6 +601,16 @@ type DeviceConstraint struct { // +optional // +oneOf=ConstraintType MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` + + // Potential future extension, not part of the current design: + // A CEL expression which compares different devices and returns + // true if they match. + // + // Because it would be part of a one-of, old schedulers will not + // accidentally ignore this additional, for them unknown match + // criteria. + // + // MatchExpression string } // DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. @@ -603,10 +656,16 @@ type OpaqueDeviceConfiguration struct { // includes self-identification and a version ("kind" + "apiVersion" for // Kubernetes types), with conversion between different versions. // + // The length of the raw data must be smaller or equal to 10 Ki. + // // +required Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` } +// OpaqueParametersMaxLength is the maximum length of the raw data in an +// [OpaqueDeviceConfiguration.Parameters] field. +const OpaqueParametersMaxLength = 10 * 1024 + // ResourceClaimStatus tracks whether the resource has been allocated and what // the result of that was. type ResourceClaimStatus struct { @@ -632,7 +691,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -642,24 +701,27 @@ type ResourceClaimStatus struct { // +patchMergeKey=uid ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` - // Indicates that a claim is to be deallocated. While this is set, - // no new consumers may be added to ReservedFor. - // - // This is only used if the claim needs to be deallocated by a DRA driver. - // That driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. + // DeallocationRequested is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. // // +optional - // +featureGate=DRAControlPlaneController - DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same @@ -694,21 +756,10 @@ type AllocationResult struct { // +optional NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` - // Controller is the name of the DRA driver which handled the - // allocation. That driver is also responsible for deallocating the - // claim. It is empty when the claim can be deallocated without - // involving a driver. - // - // A driver may allocate devices provided by other drivers, so this - // driver name here can be different from the driver names listed for - // the results. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` } // DeviceAllocationResult is the result of allocating devices. @@ -769,6 +820,18 @@ type DeviceRequestAllocationResult struct { // // +required Device string `json:"device" protobuf:"bytes,4,name=device"` + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"` } // DeviceAllocationConfiguration gets embedded in an AllocationResult. @@ -799,7 +862,8 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimList // ResourceClaimList is a collection of claims. type ResourceClaimList struct { @@ -812,111 +876,11 @@ type ResourceClaimList struct { Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DRAControlPlaneController -// feature gate. -type PodSchedulingContext struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes where resources for the Pod are needed. - Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes where resources for the Pod can be allocated. - // - // +optional - Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -type PodSchedulingContextSpec struct { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // - // +optional - SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -type PodSchedulingContextStatus struct { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` - - // If there ever is a need to support other kinds of resources - // than ResourceClaim, then new fields could get added here - // for those other resources. -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -type ResourceClaimSchedulingStatus struct { - // Name matches the pod.spec.resourceClaims[*].Name field. - // - // +required - Name string `json:"name" protobuf:"bytes,1,name=name"` - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` -} - -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodSchedulingContext objects. This limit is part -// of the API. -const PodSchedulingNodeListMaxSize = 128 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContextList is a collection of Pod scheduling objects. -type PodSchedulingContextList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of PodSchedulingContext objects. - Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,DeviceClass // DeviceClass is a vendor- or admin-provided resource that contains // device configuration and selectors. It can be referenced in @@ -961,21 +925,10 @@ type DeviceClassSpec struct { // +listType=atomic Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a claim that has not been allocated yet *and* that claim - // gets allocated through a control plane controller. It is ignored - // when the claim does not use a control plane controller - // for allocation. - // - // Setting this field is optional. If unset, all Nodes are candidates. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` + // SuitableNodes is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` } // DeviceClassConfiguration is used in DeviceClass. @@ -984,7 +937,8 @@ type DeviceClassConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,DeviceClassList // DeviceClassList is a collection of classes. type DeviceClassList struct { @@ -999,7 +953,8 @@ type DeviceClassList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimTemplate // ResourceClaimTemplate is used to produce ResourceClaim objects. // @@ -1021,7 +976,7 @@ type ResourceClaimTemplate struct { // ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. type ResourceClaimTemplateSpec struct { - // ObjectMeta may contain labels and annotations that will be copied into the PVC + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim // when creating it. No other fields are allowed and will be rejected during // validation. // +optional @@ -1034,7 +989,8 @@ type ResourceClaimTemplateSpec struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimTemplateList // ResourceClaimTemplateList is a collection of claim templates. type ResourceClaimTemplateList struct { @@ -1046,3 +1002,84 @@ type ResourceClaimTemplateList struct { // Items is the list of resource claim templates. Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +type AllocatedDeviceStatus struct { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,2,rep,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,3,rep,name=device"` + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,4,opt,name=conditions"` + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,5,opt,name=data"` + + // NetworkData contains network-related information specific to the device. + // + // +optional + NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"` +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +type NetworkDeviceData struct { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"` + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"` + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"` +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 1a44a971d..b41609d11 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -27,11 +27,24 @@ package v1alpha3 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocatedDeviceStatus = map[string]string{ + "": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "conditions": "Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True.", + "data": "Data contains arbitrary driver-specific data.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", + "networkData": "NetworkData contains network-related information specific to the device.", +} + +func (AllocatedDeviceStatus) SwaggerDoc() map[string]string { + return map_AllocatedDeviceStatus +} + var map_AllocationResult = map[string]string{ "": "AllocationResult contains attributes of an allocated resource.", "devices": "Devices is the result of allocating devices.", "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", - "controller": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", } func (AllocationResult) SwaggerDoc() map[string]string { @@ -50,7 +63,7 @@ func (BasicDevice) SwaggerDoc() map[string]string { var map_CELDeviceSelector = map[string]string{ "": "CELDeviceSelector contains a CEL expression for selecting a device.", - "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)\n\nThe length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps.", } func (CELDeviceSelector) SwaggerDoc() map[string]string { @@ -148,10 +161,9 @@ func (DeviceClassList) SwaggerDoc() map[string]string { } var map_DeviceClassSpec = map[string]string{ - "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", - "selectors": "Each selector must be satisfied by a device which is claimed via this class.", - "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", - "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", } func (DeviceClassSpec) SwaggerDoc() map[string]string { @@ -184,7 +196,7 @@ var map_DeviceRequest = map[string]string{ "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", - "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", } func (DeviceRequest) SwaggerDoc() map[string]string { @@ -192,11 +204,12 @@ func (DeviceRequest) SwaggerDoc() map[string]string { } var map_DeviceRequestAllocationResult = map[string]string{ - "": "DeviceRequestAllocationResult contains the allocation result for one request.", - "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", - "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", - "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", - "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", } func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { @@ -212,56 +225,27 @@ func (DeviceSelector) SwaggerDoc() map[string]string { return map_DeviceSelector } +var map_NetworkDeviceData = map[string]string{ + "": "NetworkDeviceData provides network-related details for the allocated device. This information may be filled by drivers or other components to configure or identify the device within a network context.", + "interfaceName": "InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod.\n\nMust not be longer than 256 characters.", + "ips": "IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6.", + "hardwareAddress": "HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.\n\nMust not be longer than 128 characters.", +} + +func (NetworkDeviceData) SwaggerDoc() map[string]string { + return map_NetworkDeviceData +} + var map_OpaqueDeviceConfiguration = map[string]string{ "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", - "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", } func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { return map_OpaqueDeviceConfiguration } -var map_PodSchedulingContext = map[string]string{ - "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes where resources for the Pod are needed.", - "status": "Status describes where resources for the Pod can be allocated.", -} - -func (PodSchedulingContext) SwaggerDoc() map[string]string { - return map_PodSchedulingContext -} - -var map_PodSchedulingContextList = map[string]string{ - "": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "metadata": "Standard list metadata", - "items": "Items is the list of PodSchedulingContext objects.", -} - -func (PodSchedulingContextList) SwaggerDoc() map[string]string { - return map_PodSchedulingContextList -} - -var map_PodSchedulingContextSpec = map[string]string{ - "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", -} - -func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingContextSpec -} - -var map_PodSchedulingContextStatus = map[string]string{ - "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", -} - -func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingContextStatus -} - var map_ResourceClaim = map[string]string{ "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "metadata": "Standard object metadata", @@ -295,20 +279,9 @@ func (ResourceClaimList) SwaggerDoc() map[string]string { return map_ResourceClaimList } -var map_ResourceClaimSchedulingStatus = map[string]string{ - "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "name": "Name matches the pod.spec.resourceClaims[*].Name field.", - "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", -} - -func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimSchedulingStatus -} - var map_ResourceClaimSpec = map[string]string{ - "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", - "devices": "Devices defines how to request devices.", - "controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", } func (ResourceClaimSpec) SwaggerDoc() map[string]string { @@ -316,10 +289,10 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { } var map_ResourceClaimStatus = map[string]string{ - "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", - "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", - "deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } func (ResourceClaimStatus) SwaggerDoc() map[string]string { @@ -348,7 +321,7 @@ func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { var map_ResourceClaimTemplateSpec = map[string]string{ "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", - "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim when creating it. No other fields are allowed and will be rejected during validation.", "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", } diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go index 58171df1f..07ba47b59 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go @@ -22,18 +22,48 @@ limitations under the License. package v1alpha3 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Data.DeepCopyInto(&out.Data) + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(NetworkDeviceData) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus. +func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus { + if in == nil { + return nil + } + out := new(AllocatedDeviceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { *out = *in in.Devices.DeepCopyInto(&out.Devices) if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector - *out = new(v1.NodeSelector) + *out = new(corev1.NodeSelector) (*in).DeepCopyInto(*out) } return @@ -144,7 +174,9 @@ func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { if in.Results != nil { in, out := &in.Results, &out.Results *out = make([]DeviceRequestAllocationResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.Config != nil { in, out := &in.Config, &out.Config @@ -355,11 +387,6 @@ func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.SuitableNodes != nil { - in, out := &in.SuitableNodes, &out.SuitableNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } return } @@ -430,6 +457,11 @@ func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } return } @@ -446,6 +478,11 @@ func (in *DeviceRequest) DeepCopy() *DeviceRequest { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { *out = *in + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } return } @@ -481,123 +518,39 @@ func (in *DeviceSelector) DeepCopy() *DeviceSelector { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { - *out = *in - in.Parameters.DeepCopyInto(&out.Parameters) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. -func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { - if in == nil { - return nil - } - out := new(OpaqueDeviceConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { +func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. -func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { - if in == nil { - return nil - } - out := new(PodSchedulingContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSchedulingContext, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. -func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { - if in == nil { - return nil - } - out := new(PodSchedulingContextList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { - *out = *in - if in.PotentialNodes != nil { - in, out := &in.PotentialNodes, &out.PotentialNodes + if in.IPs != nil { + in, out := &in.IPs, &out.IPs *out = make([]string, len(*in)) copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. -func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData. +func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData { if in == nil { return nil } - out := new(PodSchedulingContextSpec) + out := new(NetworkDeviceData) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { *out = *in - if in.ResourceClaims != nil { - in, out := &in.ResourceClaims, &out.ResourceClaims - *out = make([]ResourceClaimSchedulingStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.Parameters.DeepCopyInto(&out.Parameters) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. -func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { if in == nil { return nil } - out := new(PodSchedulingContextStatus) + out := new(OpaqueDeviceConfiguration) in.DeepCopyInto(out) return out } @@ -679,27 +632,6 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { - *out = *in - if in.UnsuitableNodes != nil { - in, out := &in.UnsuitableNodes, &out.UnsuitableNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. -func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { - if in == nil { - return nil - } - out := new(ResourceClaimSchedulingStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { *out = *in @@ -730,6 +662,13 @@ func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { *out = make([]ResourceClaimConsumerReference, len(*in)) copy(*out, *in) } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]AllocatedDeviceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -903,7 +842,7 @@ func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { out.Pool = in.Pool if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector - *out = new(v1.NodeSelector) + *out = new(corev1.NodeSelector) (*in).DeepCopyInto(*out) } if in.Devices != nil { diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..9f57ab670 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,218 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClass) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClass) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *DeviceClass) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "DeviceClass"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClass) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *DeviceClassList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "DeviceClassList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaim) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaim) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaim"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaim) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaimList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaimTemplate) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplate"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaimTemplateList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplateList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSlice) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceSlice) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSlice"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSlice) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSliceList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceSliceList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSliceList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSliceList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/resource/v1beta1/doc.go b/vendor/k8s.io/api/resource/v1beta1/doc.go new file mode 100644 index 000000000..88c35c6ca --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName=resource.k8s.io + +// Package v1beta1 is the v1beta1 version of the resource API. +package v1beta1 // import "k8s.io/api/resource/v1beta1" diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.pb.go b/vendor/k8s.io/api/resource/v1beta1/generated.pb.go new file mode 100644 index 000000000..df4e68f30 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/generated.pb.go @@ -0,0 +1,8655 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} } +func (*AllocatedDeviceStatus) ProtoMessage() {} +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{0} +} +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src) +} +func (m *AllocatedDeviceStatus) XXX_Size() int { + return m.Size() +} +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{1} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *BasicDevice) Reset() { *m = BasicDevice{} } +func (*BasicDevice) ProtoMessage() {} +func (*BasicDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{2} +} +func (m *BasicDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BasicDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicDevice.Merge(m, src) +} +func (m *BasicDevice) XXX_Size() int { + return m.Size() +} +func (m *BasicDevice) XXX_DiscardUnknown() { + xxx_messageInfo_BasicDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{3} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{4} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{5} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{6} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{7} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} } +func (*DeviceCapacity) ProtoMessage() {} +func (*DeviceCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{8} +} +func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCapacity.Merge(m, src) +} +func (m *DeviceCapacity) XXX_Size() int { + return m.Size() +} +func (m *DeviceCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{9} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{10} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{11} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{12} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{13} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{14} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{15} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{16} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{17} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{18} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{19} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } +func (*NetworkDeviceData) ProtoMessage() {} +func (*NetworkDeviceData) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{20} +} +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkDeviceData.Merge(m, src) +} +func (m *NetworkDeviceData) XXX_Size() int { + return m.Size() +} +func (m *NetworkDeviceData) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{21} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{22} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{23} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{24} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{25} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{26} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{27} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{28} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{29} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{30} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{31} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{32} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{33} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1beta1.AllocatedDeviceStatus") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1beta1.AllocationResult") + proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1beta1.BasicDevice") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.CapacityEntry") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1beta1.CELDeviceSelector") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1beta1.Device") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.DeviceAttribute") + proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.DeviceCapacity") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1beta1.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1beta1.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1beta1.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1beta1.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1beta1.DeviceConstraint") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1beta1.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceRequestAllocationResult") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1beta1.DeviceSelector") + proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1beta1.NetworkDeviceData") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.OpaqueDeviceConfiguration") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1beta1.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimList") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1beta1.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1beta1.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1beta1/generated.proto", fileDescriptor_ba331e3ec6484c27) +} + +var fileDescriptor_ba331e3ec6484c27 = []byte{ + // 2051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4b, 0x8f, 0x1b, 0x49, + 0x79, 0xda, 0xed, 0x79, 0x7d, 0x9e, 0x57, 0x2a, 0x64, 0x71, 0x26, 0xc2, 0x9e, 0x74, 0x24, 0xf0, + 0x66, 0xb3, 0xed, 0x8d, 0x81, 0x28, 0xca, 0x5e, 0x70, 0xcf, 0xcc, 0x06, 0x43, 0x32, 0x99, 0xad, + 0x61, 0x43, 0xb4, 0x6c, 0x10, 0x35, 0xed, 0x9a, 0x99, 0x66, 0xec, 0x6e, 0xa7, 0xbb, 0x7a, 0xb2, + 0x73, 0x40, 0xa0, 0x3d, 0xaf, 0x10, 0x77, 0xc4, 0x85, 0x03, 0x12, 0x12, 0x42, 0xfc, 0x02, 0x90, + 0x40, 0x88, 0x88, 0x03, 0xac, 0xe0, 0xb2, 0xe2, 0x60, 0x88, 0xf7, 0x07, 0x70, 0xcf, 0x09, 0x55, + 0x75, 0xf5, 0xd3, 0x6e, 0xd3, 0x83, 0x96, 0x51, 0xf6, 0xe6, 0xfe, 0xde, 0xf5, 0xbd, 0xab, 0x0c, + 0xaf, 0x1d, 0xdf, 0xf6, 0x74, 0xcb, 0x69, 0x92, 0x81, 0xd5, 0x74, 0xa9, 0xe7, 0xf8, 0xae, 0x49, + 0x9b, 0x27, 0x37, 0xf7, 0x29, 0x23, 0x37, 0x9b, 0x87, 0xd4, 0xa6, 0x2e, 0x61, 0xb4, 0xab, 0x0f, + 0x5c, 0x87, 0x39, 0xe8, 0x4a, 0x40, 0xac, 0x93, 0x81, 0xa5, 0x87, 0xc4, 0xba, 0x24, 0x5e, 0x7f, + 0xfd, 0xd0, 0x62, 0x47, 0xfe, 0xbe, 0x6e, 0x3a, 0xfd, 0xe6, 0xa1, 0x73, 0xe8, 0x34, 0x05, 0xcf, + 0xbe, 0x7f, 0x20, 0xbe, 0xc4, 0x87, 0xf8, 0x15, 0xc8, 0x5a, 0xd7, 0x12, 0x8a, 0x4d, 0xc7, 0xe5, + 0x4a, 0xb3, 0xfa, 0xd6, 0xbf, 0x12, 0xd3, 0xf4, 0x89, 0x79, 0x64, 0xd9, 0xd4, 0x3d, 0x6d, 0x0e, + 0x8e, 0x0f, 0xd3, 0xd6, 0x9e, 0x85, 0xcb, 0x6b, 0xf6, 0x29, 0x23, 0x93, 0x74, 0x35, 0xf3, 0xb8, + 0x5c, 0xdf, 0x66, 0x56, 0x7f, 0x5c, 0xcd, 0xad, 0xff, 0xc6, 0xe0, 0x99, 0x47, 0xb4, 0x4f, 0xb2, + 0x7c, 0xda, 0xcf, 0x55, 0xb8, 0xd4, 0xee, 0xf5, 0x1c, 0x93, 0xc3, 0xb6, 0xe8, 0x89, 0x65, 0xd2, + 0x3d, 0x46, 0x98, 0xef, 0xa1, 0x2f, 0xc2, 0x5c, 0xd7, 0xb5, 0x4e, 0xa8, 0x5b, 0x55, 0x36, 0x94, + 0xc6, 0xa2, 0xb1, 0xf2, 0x6c, 0x58, 0x9f, 0x19, 0x0d, 0xeb, 0x73, 0x5b, 0x02, 0x8a, 0x25, 0x16, + 0x6d, 0x40, 0x79, 0xe0, 0x38, 0xbd, 0x6a, 0x49, 0x50, 0x2d, 0x49, 0xaa, 0xf2, 0xae, 0xe3, 0xf4, + 0xb0, 0xc0, 0x08, 0x49, 0x42, 0x72, 0x55, 0xcd, 0x48, 0x12, 0x50, 0x2c, 0xb1, 0xc8, 0x04, 0x30, + 0x1d, 0xbb, 0x6b, 0x31, 0xcb, 0xb1, 0xbd, 0x6a, 0x79, 0x43, 0x6d, 0x54, 0x5a, 0x4d, 0x3d, 0x8e, + 0x72, 0x74, 0x30, 0x7d, 0x70, 0x7c, 0xc8, 0x01, 0x9e, 0xce, 0xfd, 0xa7, 0x9f, 0xdc, 0xd4, 0x37, + 0x43, 0x3e, 0x03, 0x49, 0xe1, 0x10, 0x81, 0x3c, 0x9c, 0x10, 0x8b, 0x1e, 0x40, 0xb9, 0x4b, 0x18, + 0xa9, 0xce, 0x6e, 0x28, 0x8d, 0x4a, 0xeb, 0xf5, 0x5c, 0xf1, 0xd2, 0x6f, 0x3a, 0x26, 0x4f, 0xb7, + 0xdf, 0x67, 0xd4, 0xf6, 0xb8, 0xf0, 0xe8, 0x74, 0x5b, 0x84, 0x11, 0x2c, 0x04, 0x21, 0x02, 0x15, + 0x9b, 0xb2, 0xa7, 0x8e, 0x7b, 0xcc, 0x81, 0xd5, 0x39, 0x21, 0x57, 0xd7, 0xa7, 0x24, 0xa7, 0xbe, + 0x23, 0xe9, 0xc5, 0xb1, 0x39, 0x97, 0xb1, 0x3a, 0x1a, 0xd6, 0x2b, 0x3b, 0xb1, 0x18, 0x9c, 0x94, + 0xa9, 0xfd, 0x59, 0x81, 0x35, 0x19, 0x24, 0xcb, 0xb1, 0x31, 0xf5, 0xfc, 0x1e, 0x43, 0xdf, 0x85, + 0xf9, 0xc0, 0x6f, 0x9e, 0x08, 0x50, 0xa5, 0xf5, 0xe5, 0xa9, 0x3a, 0x03, 0x65, 0x59, 0x29, 0xc6, + 0xaa, 0x3c, 0xd1, 0x7c, 0x80, 0xf7, 0x70, 0x28, 0x14, 0x3d, 0x84, 0x25, 0xdb, 0xe9, 0xd2, 0x3d, + 0xda, 0xa3, 0x26, 0x73, 0x5c, 0x11, 0xbb, 0x4a, 0x6b, 0x23, 0xa9, 0x84, 0x57, 0x0a, 0xf7, 0xfe, + 0x4e, 0x82, 0xce, 0x58, 0x1b, 0x0d, 0xeb, 0x4b, 0x49, 0x08, 0x4e, 0xc9, 0xd1, 0xfe, 0xa1, 0x42, + 0xc5, 0x20, 0x9e, 0x65, 0x06, 0x1a, 0xd1, 0x0f, 0x00, 0x08, 0x63, 0xae, 0xb5, 0xef, 0x33, 0x71, + 0x14, 0x1e, 0xf5, 0xdb, 0x53, 0x8f, 0x92, 0xe0, 0xd6, 0xdb, 0x11, 0xeb, 0xb6, 0xcd, 0xdc, 0x53, + 0xe3, 0x5a, 0x18, 0xfe, 0x18, 0xf1, 0xc1, 0x3f, 0xeb, 0xcb, 0x6f, 0xfb, 0xa4, 0x67, 0x1d, 0x58, + 0xb4, 0xbb, 0x43, 0xfa, 0x14, 0x27, 0x14, 0x22, 0x1f, 0x16, 0x4c, 0x32, 0x20, 0xa6, 0xc5, 0x4e, + 0xab, 0x25, 0xa1, 0xfc, 0x56, 0x61, 0xe5, 0x9b, 0x92, 0x31, 0x50, 0x7d, 0x55, 0xaa, 0x5e, 0x08, + 0xc1, 0xe3, 0x8a, 0x23, 0x55, 0xeb, 0xc7, 0xb0, 0x9a, 0x31, 0x1d, 0xad, 0x81, 0x7a, 0x4c, 0x4f, + 0x83, 0x6a, 0xc3, 0xfc, 0x27, 0x32, 0x60, 0xf6, 0x84, 0xf4, 0x7c, 0x2a, 0x6a, 0xab, 0xd2, 0xba, + 0x51, 0x24, 0xc0, 0xa1, 0x50, 0x1c, 0xb0, 0xde, 0x29, 0xdd, 0x56, 0xd6, 0x8f, 0x60, 0x39, 0x65, + 0xea, 0x04, 0x55, 0xed, 0xb4, 0xaa, 0xd7, 0x0a, 0xa8, 0x0a, 0x45, 0x26, 0x34, 0x69, 0x77, 0xe1, + 0xc2, 0xe6, 0xf6, 0x3d, 0xd9, 0x47, 0x64, 0xc4, 0x51, 0x0b, 0x80, 0xbe, 0x3f, 0x70, 0xa9, 0xc7, + 0x6b, 0x48, 0x76, 0x93, 0xa8, 0x4c, 0xb7, 0x23, 0x0c, 0x4e, 0x50, 0x69, 0x3e, 0xc8, 0xee, 0xc0, + 0xfb, 0x8b, 0x4d, 0xfa, 0x54, 0xf2, 0x45, 0x15, 0x28, 0xfc, 0x29, 0x30, 0xa8, 0x03, 0xb3, 0xfb, + 0x3c, 0x2a, 0xd2, 0xf6, 0x46, 0xd1, 0xf8, 0x19, 0x8b, 0xa3, 0x61, 0x7d, 0x56, 0x00, 0x70, 0x20, + 0x41, 0xfb, 0xb0, 0x04, 0x5f, 0xc8, 0x56, 0xca, 0xa6, 0x63, 0x1f, 0x58, 0x87, 0xbe, 0x2b, 0x3e, + 0xd0, 0xd7, 0x60, 0x2e, 0x90, 0x28, 0x0d, 0x6a, 0x84, 0xcd, 0x6c, 0x4f, 0x40, 0x5f, 0x0c, 0xeb, + 0xaf, 0x64, 0x59, 0x03, 0x0c, 0x96, 0x7c, 0xa8, 0x01, 0x0b, 0x2e, 0x7d, 0xe2, 0x53, 0x8f, 0x79, + 0x22, 0xe3, 0x16, 0x8d, 0x25, 0x9e, 0x35, 0x58, 0xc2, 0x70, 0x84, 0x45, 0x3f, 0x84, 0x8b, 0x41, + 0x35, 0xa6, 0x4c, 0x90, 0x95, 0xf8, 0x46, 0x91, 0x10, 0x25, 0xf9, 0x8c, 0x2b, 0xd2, 0xd4, 0x8b, + 0x13, 0x90, 0x78, 0x92, 0x26, 0xed, 0x13, 0x05, 0x5e, 0x99, 0xdc, 0x38, 0x10, 0x85, 0x79, 0x57, + 0xfc, 0x0a, 0x6b, 0xf6, 0x4e, 0x01, 0x7b, 0xe4, 0x19, 0xf3, 0xbb, 0x50, 0xf0, 0xed, 0xe1, 0x50, + 0x36, 0xda, 0x87, 0x39, 0x53, 0x98, 0x24, 0x8b, 0xf3, 0xce, 0x99, 0x9a, 0x5c, 0xfa, 0xfc, 0xd1, + 0xdc, 0x09, 0xc0, 0x58, 0x4a, 0xd6, 0x7e, 0xa9, 0xc0, 0x6a, 0xa6, 0x7a, 0x50, 0x0d, 0x54, 0xcb, + 0x66, 0x22, 0xa3, 0xd4, 0x20, 0x3e, 0x1d, 0x9b, 0x3d, 0xe4, 0x79, 0x8e, 0x39, 0x02, 0x5d, 0x85, + 0xf2, 0x3e, 0x9f, 0x7a, 0x3c, 0x16, 0x0b, 0xc6, 0xf2, 0x68, 0x58, 0x5f, 0x34, 0x1c, 0xa7, 0x17, + 0x50, 0x08, 0x14, 0xfa, 0x12, 0xcc, 0x79, 0xcc, 0xb5, 0xec, 0xc3, 0x6a, 0x59, 0x64, 0x8a, 0xe8, + 0xf1, 0x7b, 0x02, 0x12, 0x90, 0x49, 0x34, 0xba, 0x0e, 0xf3, 0x27, 0xd4, 0x15, 0xc5, 0x31, 0x2b, + 0x28, 0x45, 0x0b, 0x7d, 0x18, 0x80, 0x02, 0xd2, 0x90, 0x40, 0xa3, 0xb0, 0x92, 0xae, 0x3e, 0xb4, + 0x17, 0x56, 0xae, 0x32, 0x36, 0x79, 0xc6, 0x06, 0x66, 0xec, 0xb1, 0xb7, 0x7d, 0x62, 0x33, 0x8b, + 0x9d, 0x1a, 0xcb, 0xd2, 0x29, 0xb3, 0x81, 0xa2, 0x40, 0x96, 0xf6, 0xab, 0x12, 0x54, 0xa4, 0x9e, + 0x1e, 0xb1, 0xfa, 0xe8, 0x51, 0x22, 0x67, 0x83, 0x70, 0x5f, 0x2f, 0x1e, 0x6e, 0x63, 0x2d, 0xec, + 0x8c, 0x13, 0x72, 0xbc, 0x0b, 0x15, 0xd3, 0xb1, 0x3d, 0xe6, 0x12, 0xcb, 0x96, 0x05, 0x91, 0x1e, + 0xcb, 0x53, 0x72, 0x5b, 0x72, 0x19, 0x17, 0xa5, 0xfc, 0x4a, 0x0c, 0xf3, 0x70, 0x52, 0x2c, 0x7a, + 0x1c, 0xa5, 0x91, 0x2a, 0x14, 0x7c, 0xb5, 0x88, 0x02, 0x7e, 0xf2, 0x62, 0x19, 0xf4, 0x47, 0x05, + 0xaa, 0x79, 0x4c, 0xa9, 0x7a, 0x57, 0xfe, 0x97, 0x7a, 0x2f, 0x9d, 0x5b, 0xbd, 0xff, 0x4e, 0x49, + 0x84, 0xdd, 0xf3, 0xd0, 0xf7, 0x60, 0x81, 0x6f, 0x58, 0x62, 0x61, 0x52, 0xc6, 0xac, 0x98, 0xb2, + 0x8f, 0x3d, 0xd8, 0xff, 0x3e, 0x35, 0xd9, 0x7d, 0xca, 0x48, 0xdc, 0xe9, 0x63, 0x18, 0x8e, 0xa4, + 0xa2, 0x1d, 0x28, 0x7b, 0x03, 0x6a, 0x9e, 0x61, 0xc2, 0x09, 0xcb, 0xf6, 0x06, 0xd4, 0x8c, 0x67, + 0x01, 0xff, 0xc2, 0x42, 0x8e, 0xf6, 0xd3, 0x64, 0x24, 0x3c, 0x2f, 0x1d, 0x89, 0x1c, 0xff, 0x2a, + 0xe7, 0xe6, 0xdf, 0xdf, 0x46, 0x9d, 0x46, 0x58, 0x77, 0xcf, 0xf2, 0x18, 0x7a, 0x6f, 0xcc, 0xc7, + 0x7a, 0x31, 0x1f, 0x73, 0x6e, 0xe1, 0xe1, 0xa8, 0xbc, 0x42, 0x48, 0xc2, 0xbf, 0xf7, 0x61, 0xd6, + 0x62, 0xb4, 0x1f, 0x16, 0x56, 0xa3, 0xa8, 0x83, 0xe3, 0xbe, 0xd0, 0xe1, 0xec, 0x38, 0x90, 0xa2, + 0xfd, 0x25, 0x7d, 0x00, 0xee, 0x78, 0xf4, 0x1e, 0x2c, 0x7a, 0x72, 0xd4, 0x87, 0xcd, 0xa1, 0xc8, + 0xfa, 0x10, 0x2d, 0x8c, 0x17, 0xa4, 0xa6, 0xc5, 0x10, 0xe2, 0xe1, 0x58, 0x60, 0xa2, 0x72, 0x4b, + 0x67, 0xa9, 0xdc, 0x4c, 0xe8, 0x73, 0x2b, 0xf7, 0x09, 0x4c, 0x8a, 0x1e, 0x7a, 0x17, 0xe6, 0x9c, + 0x01, 0x79, 0x12, 0x75, 0xd5, 0xe9, 0x3b, 0xe1, 0x03, 0x41, 0x3a, 0x29, 0x45, 0x80, 0xab, 0x0c, + 0xd0, 0x58, 0x4a, 0xd4, 0x7e, 0xac, 0xc0, 0x5a, 0xb6, 0x85, 0x9d, 0xa1, 0x49, 0xec, 0xc2, 0x4a, + 0x9f, 0x30, 0xf3, 0x28, 0x9a, 0x55, 0xf2, 0xe6, 0xd5, 0x18, 0x0d, 0xeb, 0x2b, 0xf7, 0x53, 0x98, + 0x17, 0xc3, 0x3a, 0x7a, 0xcb, 0xef, 0xf5, 0x4e, 0xd3, 0x5b, 0x68, 0x86, 0x5f, 0xfb, 0x40, 0x85, + 0xe5, 0x54, 0xc3, 0x2e, 0xb0, 0x73, 0xb5, 0x61, 0xb5, 0x1b, 0xfb, 0x9a, 0x23, 0xa4, 0x19, 0x9f, + 0x97, 0xc4, 0xc9, 0x34, 0x11, 0x7c, 0x59, 0xfa, 0x74, 0xde, 0xa8, 0x9f, 0x76, 0xde, 0x3c, 0x84, + 0x15, 0x12, 0xed, 0x01, 0xf7, 0x9d, 0x2e, 0x95, 0x53, 0x58, 0x97, 0x5c, 0x2b, 0xed, 0x14, 0xf6, + 0xc5, 0xb0, 0xfe, 0xb9, 0xec, 0xf6, 0xc0, 0xe1, 0x38, 0x23, 0x05, 0x5d, 0x83, 0x59, 0xd3, 0xf1, + 0x6d, 0x26, 0x46, 0xb5, 0x1a, 0x97, 0xc9, 0x26, 0x07, 0xe2, 0x00, 0x87, 0x6e, 0x42, 0x85, 0x74, + 0xfb, 0x96, 0xdd, 0x36, 0x4d, 0xea, 0x79, 0xe2, 0x4e, 0xb8, 0x10, 0xcc, 0xff, 0x76, 0x0c, 0xc6, + 0x49, 0x1a, 0xed, 0xdf, 0x4a, 0xb8, 0x79, 0xe6, 0x2c, 0x49, 0xe8, 0x55, 0xbe, 0x71, 0x09, 0x94, + 0x8c, 0x4b, 0x62, 0x6b, 0x12, 0x60, 0x1c, 0xe2, 0x13, 0x77, 0xf7, 0x52, 0xa1, 0xbb, 0xbb, 0x5a, + 0xe0, 0xee, 0x5e, 0x9e, 0x7a, 0x77, 0xcf, 0x9c, 0x78, 0xb6, 0xc0, 0x89, 0xbf, 0x13, 0xae, 0x32, + 0xd1, 0x45, 0xa1, 0x03, 0xaa, 0x49, 0x7b, 0x13, 0xba, 0xe0, 0x78, 0x2e, 0x8c, 0xdd, 0x32, 0x8c, + 0xf9, 0xd1, 0xb0, 0xae, 0x6e, 0x6e, 0xdf, 0xc3, 0x5c, 0x86, 0xf6, 0x6b, 0x05, 0x2e, 0x8c, 0x5d, + 0xb3, 0xd1, 0x9b, 0xb0, 0x6c, 0xd9, 0x8c, 0xba, 0x07, 0xc4, 0xa4, 0x3b, 0x71, 0x82, 0x5f, 0x92, + 0x87, 0x5a, 0xee, 0x24, 0x91, 0x38, 0x4d, 0x8b, 0x2e, 0x83, 0x6a, 0x0d, 0xc2, 0x95, 0x5d, 0x68, + 0xeb, 0xec, 0x7a, 0x98, 0xc3, 0x78, 0x35, 0x1c, 0x11, 0xb7, 0xfb, 0x94, 0xb8, 0xb4, 0xdd, 0xed, + 0xf2, 0x3b, 0x8c, 0x74, 0x69, 0x54, 0x0d, 0x5f, 0x4f, 0xa3, 0x71, 0x96, 0x5e, 0xfb, 0x85, 0x02, + 0x97, 0x73, 0xfb, 0x48, 0xe1, 0xc7, 0x18, 0x02, 0x30, 0x20, 0x2e, 0xe9, 0x53, 0x46, 0x5d, 0x4f, + 0x0e, 0xd5, 0x33, 0xbe, 0x71, 0x44, 0xf3, 0x7a, 0x37, 0x12, 0x84, 0x13, 0x42, 0xb5, 0x9f, 0x95, + 0x60, 0x19, 0xcb, 0x70, 0x04, 0xcb, 0xe1, 0xff, 0x7f, 0x4b, 0xd8, 0x4d, 0x6d, 0x09, 0xd3, 0x33, + 0x23, 0x65, 0x5b, 0xde, 0x9e, 0x80, 0x1e, 0xf1, 0xe5, 0x9c, 0x30, 0xdf, 0x2b, 0x74, 0x9b, 0x4a, + 0xcb, 0x14, 0x7c, 0x71, 0x08, 0x82, 0x6f, 0x2c, 0xe5, 0x69, 0x23, 0x05, 0x6a, 0x29, 0x7a, 0xde, + 0xe5, 0xfd, 0x3e, 0x75, 0x31, 0x3d, 0xa0, 0x2e, 0xb5, 0x4d, 0x8a, 0x6e, 0xc0, 0x02, 0x19, 0x58, + 0x77, 0x5d, 0xc7, 0x1f, 0xc8, 0x78, 0x46, 0x23, 0xbc, 0xbd, 0xdb, 0x11, 0x70, 0x1c, 0x51, 0x70, + 0xea, 0xd0, 0x20, 0x99, 0x55, 0x89, 0x7d, 0x3a, 0x80, 0xe3, 0x88, 0x22, 0x6a, 0xdd, 0xe5, 0xdc, + 0xd6, 0x6d, 0x80, 0xea, 0x5b, 0x5d, 0x79, 0xd5, 0x78, 0x43, 0x12, 0xa8, 0xef, 0x74, 0xb6, 0x5e, + 0x0c, 0xeb, 0x57, 0xf3, 0x9e, 0x11, 0xd9, 0xe9, 0x80, 0x7a, 0xfa, 0x3b, 0x9d, 0x2d, 0xcc, 0x99, + 0xb5, 0xdf, 0x2b, 0x70, 0x21, 0x75, 0xc8, 0x73, 0x58, 0x65, 0x1e, 0xa4, 0x57, 0x99, 0xeb, 0xc5, + 0x23, 0x96, 0xb3, 0xcc, 0x1c, 0x65, 0xce, 0x20, 0xb6, 0x99, 0xbd, 0xec, 0xb3, 0x5a, 0xa3, 0xe8, + 0x55, 0x21, 0xff, 0x2d, 0x4d, 0xfb, 0x53, 0x09, 0x2e, 0x4e, 0xc8, 0x21, 0xf4, 0x18, 0x20, 0x1e, + 0x2f, 0x52, 0xdf, 0xf4, 0xbb, 0xcf, 0xd8, 0xd5, 0x79, 0x45, 0x3c, 0x76, 0xc5, 0xd0, 0x84, 0x40, + 0xe4, 0x42, 0xc5, 0xa5, 0x1e, 0x75, 0x4f, 0x68, 0xf7, 0x2d, 0xc7, 0x95, 0x7e, 0x7b, 0xb3, 0xb8, + 0xdf, 0xc6, 0x32, 0x37, 0xbe, 0x69, 0xe1, 0x58, 0x2e, 0x4e, 0x2a, 0x41, 0x8f, 0x63, 0xff, 0x05, + 0x2f, 0xb8, 0xad, 0x22, 0xe7, 0x49, 0xbf, 0x3d, 0x4f, 0xf1, 0xe4, 0xdf, 0x15, 0xb8, 0x94, 0xb2, + 0xf1, 0x5b, 0xb4, 0x3f, 0xe8, 0x11, 0x46, 0xcf, 0xa1, 0x0b, 0x3d, 0x4a, 0x75, 0xa1, 0x5b, 0xc5, + 0xfd, 0x18, 0xda, 0x98, 0x7b, 0x6b, 0xf9, 0x9b, 0x02, 0x97, 0x27, 0x72, 0x9c, 0x43, 0x59, 0x7d, + 0x3b, 0x5d, 0x56, 0xad, 0xb3, 0x1f, 0x2b, 0xa7, 0xbc, 0xfe, 0x9a, 0x77, 0x28, 0x51, 0x67, 0x9f, + 0xc1, 0xa1, 0xa1, 0xfd, 0x46, 0x81, 0xa5, 0x90, 0x92, 0xef, 0x48, 0x05, 0xf6, 0xe4, 0x16, 0x80, + 0xfc, 0xcb, 0x25, 0xbc, 0xc9, 0xab, 0xb1, 0xd9, 0x77, 0x23, 0x0c, 0x4e, 0x50, 0xa1, 0x6f, 0x00, + 0x0a, 0x0d, 0xdc, 0xeb, 0x89, 0x55, 0x80, 0xef, 0x9b, 0xaa, 0xe0, 0x5d, 0x97, 0xbc, 0x08, 0x8f, + 0x51, 0xe0, 0x09, 0x5c, 0xda, 0x1f, 0x94, 0x78, 0x5a, 0x0b, 0xf0, 0x4b, 0xea, 0x78, 0x61, 0x5b, + 0xae, 0xe3, 0x93, 0xe3, 0x46, 0x50, 0xbe, 0xac, 0xe3, 0x46, 0x18, 0x97, 0x53, 0x0f, 0x1f, 0xaa, + 0x99, 0x43, 0x88, 0x3a, 0x28, 0xba, 0xd9, 0x7d, 0x33, 0xf1, 0x37, 0x5b, 0xa5, 0xf5, 0x6a, 0x21, + 0x6b, 0x78, 0x8e, 0x4e, 0xdc, 0xea, 0x6f, 0xc0, 0x82, 0xed, 0x74, 0x83, 0x15, 0x38, 0xb3, 0x52, + 0xec, 0x48, 0x38, 0x8e, 0x28, 0xc6, 0xfe, 0x09, 0x2a, 0x7f, 0x3a, 0xff, 0x04, 0x89, 0x35, 0xa8, + 0xd7, 0xe3, 0x04, 0xe1, 0x85, 0x21, 0x5e, 0x83, 0x24, 0x1c, 0x47, 0x14, 0x68, 0x27, 0x1e, 0x2c, + 0x73, 0x22, 0x22, 0xd7, 0x0a, 0x0c, 0xe6, 0xfc, 0x49, 0x62, 0xb4, 0x9f, 0x3d, 0xaf, 0xcd, 0x7c, + 0xf4, 0xbc, 0x36, 0xf3, 0xf1, 0xf3, 0xda, 0xcc, 0x8f, 0x46, 0x35, 0xe5, 0xd9, 0xa8, 0xa6, 0x7c, + 0x34, 0xaa, 0x29, 0x1f, 0x8f, 0x6a, 0xca, 0xbf, 0x46, 0x35, 0xe5, 0x27, 0x9f, 0xd4, 0x66, 0xde, + 0xbd, 0x32, 0xe5, 0x1f, 0xe9, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x26, 0xe2, 0x5c, 0xf8, 0xaf, + 0x1e, 0x00, 0x00, +} + +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NetworkData != nil { + { + size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BasicDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) + i-- + dAtA[i] = 0x1a + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocatedDeviceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BasicDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if m.AdminAccess != nil { + n += 2 + } + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&AllocatedDeviceStatus{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BasicDevice) String() string { + if this == nil { + return "nil" + } + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]DeviceCapacity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&BasicDevice{`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceCapacity{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkDeviceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkDeviceData{`, + `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`, + `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`, + `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + repeatedStringForDevices := "[]AllocatedDeviceStatus{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]DeviceCapacity) + } + var mapkey QualifiedName + mapvalue := &DeviceCapacity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceCapacity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InterfaceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HardwareAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, AllocatedDeviceStatus{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllNodes = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto new file mode 100644 index 000000000..4ea13e033 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -0,0 +1,892 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1beta1"; + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +message AllocatedDeviceStatus { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 2; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 3; + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4; + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5; + + // NetworkData contains network-related information specific to the device. + // + // +optional + optional NetworkDeviceData networkData = 6; +} + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; +} + +// BasicDevice defines one device instance. +message BasicDevice { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 1; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 2; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + optional string expression = 1; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + optional BasicDevice basic = 2; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceCapacity describes a quantity associated with a device. +message DeviceCapacity { + // Value defines how much of a certain device capacity is available. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 6; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +message NetworkDeviceData { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + optional string interfaceName = 1; + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + repeated string ips = 2; + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + optional string hardwareAddress = 3; +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +message OpaqueDeviceConfiguration { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +required + optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; +} + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + // +required + optional string resource = 3; + + // Name is the name of resource being referenced. + // +required + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + // +required + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +message ResourceClaimSpec { + // Devices defines how to request devices. + // + // +optional + optional DeviceClaim devices = 1; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +message ResourceClaimStatus { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + optional AllocationResult allocation = 1; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 256 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + repeated ResourceClaimConsumerReference reservedFor = 2; + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. + // + // +optional + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + repeated AllocatedDeviceStatus devices = 4; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourcePool describes the pool that ResourceSlices belong to. +message ResourcePool { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + optional string name = 1; + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + optional int64 generation = 2; + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + optional int64 resourceSliceCount = 3; +} + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceSlice { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + optional ResourceSliceSpec spec = 2; +} + +// ResourceSliceList is a collection of ResourceSlices. +message ResourceSliceList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource ResourceSlices. + repeated ResourceSlice items = 2; +} + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +message ResourceSliceSpec { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + optional string driver = 1; + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + optional ResourcePool pool = 2; + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + optional string nodeName = 3; + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4; + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional bool allNodes = 5; + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + repeated Device devices = 6; +} + diff --git a/vendor/k8s.io/api/resource/v1beta1/register.go b/vendor/k8s.io/api/resource/v1beta1/register.go new file mode 100644 index 000000000..ce0a1d930 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "resource.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DeviceClass{}, + &DeviceClassList{}, + &ResourceClaim{}, + &ResourceClaimList{}, + &ResourceClaimTemplate{}, + &ResourceClaimTemplateList{}, + &ResourceSlice{}, + &ResourceSliceList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go new file mode 100644 index 000000000..fbdc35ca8 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -0,0 +1,1088 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // Finalizer is the finalizer that gets set for claims + // which were allocated through a builtin controller. + // Reserved for use by Kubernetes, DRA driver controllers must + // use their own finalizer. + Finalizer = "resource.kubernetes.io/delete-protection" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +const ( + // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.NodeName]. + ResourceSliceSelectorNodeName = "spec.nodeName" + // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.Driver]. + ResourceSliceSelectorDriver = "spec.driver" +) + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +type ResourceSliceSpec struct { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"` + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"` + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"` + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"` + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` +} + +// DriverNameMaxLength is the maximum valid length of a driver name in the +// ResourceSliceSpec and other places. It's the same as for CSI driver names. +const DriverNameMaxLength = 63 + +// ResourcePool describes the pool that ResourceSlices belong to. +type ResourcePool struct { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"` + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"` +} + +const ResourceSliceMaxSharedCapacity = 128 +const ResourceSliceMaxDevices = 128 +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name. + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +type Device struct { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"` +} + +// BasicDevice defines one device instance. +type BasicDevice struct { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"` + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Capacity map[QualifiedName]DeviceCapacity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` +} + +// DeviceCapacity describes a quantity associated with a device. +type DeviceCapacity struct { + // Value defines how much of a certain device capacity is available. + // + // +required + Value resource.Quantity `json:"value" protobuf:"bytes,1,rep,name=value"` + + // potential future addition: fields which define how to "consume" + // capacity (= share a single device between different consumers). +} + +// Limit for the sum of the number of entries in both attributes and capacity. +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 + +// QualifiedName is the name of a device attribute or capacity. +// +// Attributes and capacities are defined either by the owner of the specific +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes +// project). Because they are sometimes compared across devices, a given name +// is expected to mean the same thing and have the same type on all devices. +// +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain +// followed by a slash ("/") followed by a C identifier +// (e.g. "dra.example.com/theName"). Names which do not include the +// domain prefix are assumed to be part of the driver's domain. Attributes +// or capacities defined by 3rd parties must include the domain prefix. +// +// The maximum length for the DNS subdomain is 63 characters (same as +// for driver names) and the maximum length of the C identifier +// is 32. +type QualifiedName string + +// FullyQualifiedName is a QualifiedName where the domain is set. +type FullyQualifiedName string + +// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name. +const DeviceMaxDomainLength = 63 + +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). +const DeviceMaxIDLength = 32 + +// DeviceAttribute must have exactly one field set. +type DeviceAttribute struct { + // The Go field names below have a Value suffix to avoid a conflict between the + // field "String" and the corresponding method. That method is required. + // The Kubernetes API is defined without that suffix to keep it more natural. + + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"` + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"` + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"` + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"` +} + +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value. +const DeviceAttributeMaxValueLength = 64 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceSliceList is a collection of ResourceSlices. +type ResourceSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource ResourceSlices. + Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +type ResourceClaimSpec struct { + // Devices defines how to request devices. + // + // +optional + Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` + + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +type DeviceClaim struct { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"` + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"` + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` + + // Potential future extension, ignored by older schedulers. This is + // fine because scoring allows users to define a preference, without + // making it a hard requirement. + // + // Score *SomeScoringStruct +} + +const ( + DeviceRequestsMaxSize = AllocationResultsMaxSize + DeviceConstraintsMaxSize = 32 + DeviceConfigMaxSize = 32 +) + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +type DeviceRequest struct { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"` + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"` + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"` + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"` + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` +} + +const ( + DeviceSelectorsMaxSize = 32 +) + +type DeviceAllocationMode string + +// Valid [DeviceRequest.CountMode] values. +const ( + DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount") + DeviceAllocationModeAll = DeviceAllocationMode("All") +) + +// DeviceSelector must have exactly one field set. +type DeviceSelector struct { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"` +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +type CELDeviceSelector struct { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + Expression string `json:"expression" protobuf:"bytes,1,name=expression"` +} + +// CELSelectorExpressionMaxCost specifies the cost limit for a single CEL selector +// evaluation. +// +// There is no overall budget for selecting a device, so the actual time +// required for that is proportional to the number of CEL selectors and how +// often they need to be evaluated, which can vary depending on several factors +// (number of devices, cluster utilization, additional constraints). +// +// Validation against this limit and [CELSelectorExpressionMaxLength] happens +// only when setting an expression for the first time or when changing it. If +// the limits are changed in a future Kubernetes release, existing users are +// guaranteed that existing expressions will continue to be valid. +// +// However, the kube-scheduler also applies this cost limit at runtime, so it +// could happen that a valid expression fails at runtime after an up- or +// downgrade. This can also happen without version skew when the cost estimate +// underestimated the actual cost. That this might happen is the reason why +// kube-scheduler enforces the runtime limit instead of relying on validation. +// +// According to +// https://github.com/kubernetes/kubernetes/blob/4aeaf1e99e82da8334c0d6dddd848a194cd44b4f/staging/src/k8s.io/apiserver/pkg/apis/cel/config.go#L20-L22, +// this gives roughly 0.1 second for each expression evaluation. +// However, this depends on how fast the machine is. +const CELSelectorExpressionMaxCost = 1000000 + +// CELSelectorExpressionMaxLength is the maximum length of a CEL selector expression string. +const CELSelectorExpressionMaxLength = 10 * 1024 + +// DeviceConstraint must have exactly one field set besides Requests. +type DeviceConstraint struct { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` + + // Potential future extension, not part of the current design: + // A CEL expression which compares different devices and returns + // true if they match. + // + // Because it would be part of a one-of, old schedulers will not + // accidentally ignore this additional, for them unknown match + // criteria. + // + // MatchExpression string +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +type DeviceClaimConfiguration struct { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"` +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +type DeviceConfiguration struct { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"` +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +type OpaqueDeviceConfiguration struct { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +required + Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` +} + +// OpaqueParametersMaxLength is the maximum length of the raw data in an +// [OpaqueDeviceConfiguration.Parameters] field. +const OpaqueParametersMaxLength = 10 * 1024 + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +type ResourceClaimStatus struct { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 256 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` + + // DeallocationRequested is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. + // + // +optional + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` +} + +// ResourceClaimReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 256 + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + // +required + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + // +required + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + // +required + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // Devices is the result of allocating devices. + // + // +optional + Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"` + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` + + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` +} + +// DeviceAllocationResult is the result of allocating devices. +type DeviceAllocationResult struct { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"` + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// AllocationResultsMaxSize represents the maximum number of +// entries in allocation.devices.results. +const AllocationResultsMaxSize = 32 + +// DeviceRequestAllocationResult contains the allocation result for one request. +type DeviceRequestAllocationResult struct { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + Request string `json:"request" protobuf:"bytes,1,name=request"` + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,2,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,3,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,4,name=device"` + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"` +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +type DeviceAllocationConfiguration struct { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"` + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"` +} + +type AllocationConfigSource string + +// Valid [DeviceAllocationConfiguration.Source] values. +const ( + AllocationConfigSourceClass = "FromClass" + AllocationConfigSourceClaim = "FromClaim" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type DeviceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +type DeviceClassSpec struct { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"` + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + + // SuitableNodes is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` +} + +// DeviceClassConfiguration is used in DeviceClass. +type DeviceClassConfiguration struct { + DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// DeviceClassList is a collection of classes. +type DeviceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +type AllocatedDeviceStatus struct { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,2,rep,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,3,rep,name=device"` + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,4,opt,name=conditions"` + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,5,opt,name=data"` + + // NetworkData contains network-related information specific to the device. + // + // +optional + NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"` +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +type NetworkDeviceData struct { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"` + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"` + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"` +} diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..4ecc35d08 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,386 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocatedDeviceStatus = map[string]string{ + "": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "conditions": "Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True.", + "data": "Data contains arbitrary driver-specific data.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", + "networkData": "NetworkData contains network-related information specific to the device.", +} + +func (AllocatedDeviceStatus) SwaggerDoc() map[string]string { + return map_AllocatedDeviceStatus +} + +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "devices": "Devices is the result of allocating devices.", + "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_BasicDevice = map[string]string{ + "": "BasicDevice defines one device instance.", + "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", +} + +func (BasicDevice) SwaggerDoc() map[string]string { + return map_BasicDevice +} + +var map_CELDeviceSelector = map[string]string{ + "": "CELDeviceSelector contains a CEL expression for selecting a device.", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)\n\nThe length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps.", +} + +func (CELDeviceSelector) SwaggerDoc() map[string]string { + return map_CELDeviceSelector +} + +var map_Device = map[string]string{ + "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "basic": "Basic defines one device instance.", +} + +func (Device) SwaggerDoc() map[string]string { + return map_Device +} + +var map_DeviceAllocationConfiguration = map[string]string{ + "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", +} + +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string { + return map_DeviceAllocationConfiguration +} + +var map_DeviceAllocationResult = map[string]string{ + "": "DeviceAllocationResult is the result of allocating devices.", + "results": "Results lists all allocated devices.", + "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", +} + +func (DeviceAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceAllocationResult +} + +var map_DeviceAttribute = map[string]string{ + "": "DeviceAttribute must have exactly one field set.", + "int": "IntValue is a number.", + "bool": "BoolValue is a true/false value.", + "string": "StringValue is a string. Must not be longer than 64 characters.", + "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", +} + +func (DeviceAttribute) SwaggerDoc() map[string]string { + return map_DeviceAttribute +} + +var map_DeviceCapacity = map[string]string{ + "": "DeviceCapacity describes a quantity associated with a device.", + "value": "Value defines how much of a certain device capacity is available.", +} + +func (DeviceCapacity) SwaggerDoc() map[string]string { + return map_DeviceCapacity +} + +var map_DeviceClaim = map[string]string{ + "": "DeviceClaim defines how to request devices with a ResourceClaim.", + "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", + "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.", + "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", +} + +func (DeviceClaim) SwaggerDoc() map[string]string { + return map_DeviceClaim +} + +var map_DeviceClaimConfiguration = map[string]string{ + "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", +} + +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClaimConfiguration +} + +var map_DeviceClass = map[string]string{ + "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (DeviceClass) SwaggerDoc() map[string]string { + return map_DeviceClass +} + +var map_DeviceClassConfiguration = map[string]string{ + "": "DeviceClassConfiguration is used in DeviceClass.", +} + +func (DeviceClassConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClassConfiguration +} + +var map_DeviceClassList = map[string]string{ + "": "DeviceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (DeviceClassList) SwaggerDoc() map[string]string { + return map_DeviceClassList +} + +var map_DeviceClassSpec = map[string]string{ + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", +} + +func (DeviceClassSpec) SwaggerDoc() map[string]string { + return map_DeviceClassSpec +} + +var map_DeviceConfiguration = map[string]string{ + "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.", + "opaque": "Opaque provides driver-specific configuration parameters.", +} + +func (DeviceConfiguration) SwaggerDoc() map[string]string { + return map_DeviceConfiguration +} + +var map_DeviceConstraint = map[string]string{ + "": "DeviceConstraint must have exactly one field set besides Requests.", + "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", +} + +func (DeviceConstraint) SwaggerDoc() map[string]string { + return map_DeviceConstraint +} + +var map_DeviceRequest = map[string]string{ + "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", + "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", + "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", + "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", + "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", +} + +func (DeviceRequest) SwaggerDoc() map[string]string { + return map_DeviceRequest +} + +var map_DeviceRequestAllocationResult = map[string]string{ + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", +} + +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceRequestAllocationResult +} + +var map_DeviceSelector = map[string]string{ + "": "DeviceSelector must have exactly one field set.", + "cel": "CEL contains a CEL expression for selecting a device.", +} + +func (DeviceSelector) SwaggerDoc() map[string]string { + return map_DeviceSelector +} + +var map_NetworkDeviceData = map[string]string{ + "": "NetworkDeviceData provides network-related details for the allocated device. This information may be filled by drivers or other components to configure or identify the device within a network context.", + "interfaceName": "InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod.\n\nMust not be longer than 256 characters.", + "ips": "IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6.", + "hardwareAddress": "HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.\n\nMust not be longer than 128 characters.", +} + +func (NetworkDeviceData) SwaggerDoc() map[string]string { + return map_NetworkDeviceData +} + +var map_OpaqueDeviceConfiguration = map[string]string{ + "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", +} + +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { + return map_OpaqueDeviceConfiguration +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "status": "Status describes whether the claim is ready to use and what has been allocated.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourcePool = map[string]string{ + "": "ResourcePool describes the pool that ResourceSlices belong to.", + "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", + "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", +} + +func (ResourcePool) SwaggerDoc() map[string]string { + return map_ResourcePool +} + +var map_ResourceSlice = map[string]string{ + "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (ResourceSlice) SwaggerDoc() map[string]string { + return map_ResourceSlice +} + +var map_ResourceSliceList = map[string]string{ + "": "ResourceSliceList is a collection of ResourceSlices.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource ResourceSlices.", +} + +func (ResourceSliceList) SwaggerDoc() map[string]string { + return map_ResourceSliceList +} + +var map_ResourceSliceSpec = map[string]string{ + "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", + "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", + "pool": "Pool describes the pool that this ResourceSlice belongs to.", + "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", + "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", +} + +func (ResourceSliceSpec) SwaggerDoc() map[string]string { + return map_ResourceSliceSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..3be61333f --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,882 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Data.DeepCopyInto(&out.Data) + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(NetworkDeviceData) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus. +func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus { + if in == nil { + return nil + } + out := new(AllocatedDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(corev1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. +func (in *AllocationResult) DeepCopy() *AllocationResult { + if in == nil { + return nil + } + out := new(AllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[QualifiedName]DeviceAttribute, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(map[QualifiedName]DeviceCapacity, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice. +func (in *BasicDevice) DeepCopy() *BasicDevice { + if in == nil { + return nil + } + out := new(BasicDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector. +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector { + if in == nil { + return nil + } + out := new(CELDeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicDevice) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration. +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration { + if in == nil { + return nil + } + out := new(DeviceAllocationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { + *out = *in + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]DeviceRequestAllocationResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceAllocationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult. +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult { + if in == nil { + return nil + } + out := new(DeviceAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) { + *out = *in + if in.IntValue != nil { + in, out := &in.IntValue, &out.IntValue + *out = new(int64) + **out = **in + } + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(bool) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } + if in.VersionValue != nil { + in, out := &in.VersionValue, &out.VersionValue + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute. +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute { + if in == nil { + return nil + } + out := new(DeviceAttribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceCapacity) DeepCopyInto(out *DeviceCapacity) { + *out = *in + out.Value = in.Value.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceCapacity. +func (in *DeviceCapacity) DeepCopy() *DeviceCapacity { + if in == nil { + return nil + } + out := new(DeviceCapacity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]DeviceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]DeviceConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClaimConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim. +func (in *DeviceClaim) DeepCopy() *DeviceClaim { + if in == nil { + return nil + } + out := new(DeviceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration. +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration { + if in == nil { + return nil + } + out := new(DeviceClaimConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass. +func (in *DeviceClass) DeepCopy() *DeviceClass { + if in == nil { + return nil + } + out := new(DeviceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) { + *out = *in + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration. +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration { + if in == nil { + return nil + } + out := new(DeviceClassConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList. +func (in *DeviceClassList) DeepCopy() *DeviceClassList { + if in == nil { + return nil + } + out := new(DeviceClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClassConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec. +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec { + if in == nil { + return nil + } + out := new(DeviceClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) { + *out = *in + if in.Opaque != nil { + in, out := &in.Opaque, &out.Opaque + *out = new(OpaqueDeviceConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration. +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration { + if in == nil { + return nil + } + out := new(DeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchAttribute != nil { + in, out := &in.MatchAttribute, &out.MatchAttribute + *out = new(FullyQualifiedName) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint. +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint { + if in == nil { + return nil + } + out := new(DeviceConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest. +func (in *DeviceRequest) DeepCopy() *DeviceRequest { + if in == nil { + return nil + } + out := new(DeviceRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { + *out = *in + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult. +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult { + if in == nil { + return nil + } + out := new(DeviceRequestAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) { + *out = *in + if in.CEL != nil { + in, out := &in.CEL, &out.CEL + *out = new(CELDeviceSelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector. +func (in *DeviceSelector) DeepCopy() *DeviceSelector { + if in == nil { + return nil + } + out := new(DeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) { + *out = *in + if in.IPs != nil { + in, out := &in.IPs, &out.IPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData. +func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData { + if in == nil { + return nil + } + out := new(NetworkDeviceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { + *out = *in + in.Parameters.DeepCopyInto(&out.Parameters) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { + if in == nil { + return nil + } + out := new(OpaqueDeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { + if in == nil { + return nil + } + out := new(ResourceClaimConsumerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { + if in == nil { + return nil + } + out := new(ResourceClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { + if in == nil { + return nil + } + out := new(ResourceClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { + *out = *in + if in.Allocation != nil { + in, out := &in.Allocation, &out.Allocation + *out = new(AllocationResult) + (*in).DeepCopyInto(*out) + } + if in.ReservedFor != nil { + in, out := &in.ReservedFor, &out.ReservedFor + *out = make([]ResourceClaimConsumerReference, len(*in)) + copy(*out, *in) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]AllocatedDeviceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { + if in == nil { + return nil + } + out := new(ResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { + if in == nil { + return nil + } + out := new(ResourceClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { + if in == nil { + return nil + } + out := new(ResourcePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice. +func (in *ResourceSlice) DeepCopy() *ResourceSlice { + if in == nil { + return nil + } + out := new(ResourceSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList. +func (in *ResourceSliceList) DeepCopy() *ResourceSliceList { + if in == nil { + return nil + } + out := new(ResourceSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { + *out = *in + out.Pool = in.Pool + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(corev1.NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec. +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec { + if in == nil { + return nil + } + out := new(ResourceSliceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b79111b81 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,166 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClass) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClass) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClass) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaim) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaim) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSlice) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSlice) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSliceList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSliceList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index ec2beac46..dfc309bb4 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -491,8 +491,8 @@ message VolumeAttachmentList { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { // persistentVolumeName represents the name of the persistent volume to attach. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index de2bbc2e0..3936dc83b 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -169,8 +169,8 @@ type VolumeAttachmentSpec struct { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { // persistentVolumeName represents the name of the persistent volume to attach. @@ -433,7 +433,7 @@ const ( // ReadWriteOnceWithFSTypeFSGroupPolicy indicates that each volume will be examined // to determine if the volume ownership and permissions // should be modified. If a fstype is defined and the volume's access mode - // contains ReadWriteOnce, then the defined fsGroup will be applied. + // contains ReadWriteOnce or ReadWriteOncePod, then the defined fsGroup will be applied. // This mode should be defined if it's expected that the // fsGroup may need to be modified depending on the pod's SecurityPolicy. // This is the default behavior if no other FSGroupPolicy is defined. diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 89b1cbb20..eee18bd18 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 380adbf66..79acbebd8 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -155,8 +155,8 @@ message VolumeAttachmentList { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { // persistentVolumeName represents the name of the persistent volume to attach. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 1fbf65f81..7ef7353eb 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -84,8 +84,8 @@ type VolumeAttachmentSpec struct { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { // persistentVolumeName represents the name of the persistent volume to attach. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index ac87dbdca..e44f37b2d 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -72,7 +72,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index dfef3f6cc..64dcc8262 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -493,8 +493,8 @@ message VolumeAttachmentList { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { // persistentVolumeName represents the name of the persistent volume to attach. diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index ce294e3db..d9b6b7685 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -176,8 +176,8 @@ type VolumeAttachmentSpec struct { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { // persistentVolumeName represents the name of the persistent volume to attach. diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 8c1a66350..58da44fc8 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go index 321bec385..6ade24a82 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -20,12 +20,42 @@ import ( "bytes" "errors" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/util/json" ) var jsTrue = []byte("true") var jsFalse = []byte("false") +// The CBOR parsing related constants and functions below are not exported so they can be +// easily removed at a future date when the CBOR library provides equivalent functionality. + +type cborMajorType int + +const ( + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.1 + cborUnsignedInteger cborMajorType = 0 + cborNegativeInteger cborMajorType = 1 + cborByteString cborMajorType = 2 + cborTextString cborMajorType = 3 + cborArray cborMajorType = 4 + cborMap cborMajorType = 5 + cborTag cborMajorType = 6 + cborOther cborMajorType = 7 +) + +const ( + // from https://www.rfc-editor.org/rfc/rfc8949.html#name-jump-table-for-initial-byte. + // additionally, see https://www.rfc-editor.org/rfc/rfc8949.html#section-3.3-5. + cborFalseValue = 0xf4 + cborTrueValue = 0xf5 + cborNullValue = 0xf6 +) + +func cborType(b byte) cborMajorType { + return cborMajorType(b >> 5) +} + func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) @@ -59,6 +89,39 @@ func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrBool) MarshalCBOR() ([]byte, error) { + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(s.Allows) +} + +func (s *JSONSchemaPropsOrBool) UnmarshalCBOR(data []byte) error { + switch { + case len(data) == 0: + // ideally we would avoid modifying *s here, but we are matching the behavior of UnmarshalJSON + *s = JSONSchemaPropsOrBool{} + return nil + case cborType(data[0]) == cborMap: + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrBool{Allows: true, Schema: &p} + return nil + case data[0] == cborTrueValue: + *s = JSONSchemaPropsOrBool{Allows: true} + return nil + case data[0] == cborFalseValue: + *s = JSONSchemaPropsOrBool{Allows: false} + return nil + default: + // ideally, this case would not also capture a null input value, + // but we are matching the behavior of the UnmarshalJSON + return errors.New("boolean or JSON schema expected") + } +} + func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { if len(s.Property) > 0 { return json.Marshal(s.Property) @@ -91,6 +154,40 @@ func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrStringArray) MarshalCBOR() ([]byte, error) { + if len(s.Property) > 0 { + return cbor.Marshal(s.Property) + } + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(nil) +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []string + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Property: a} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Schema: &p} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrStringArray{} + return nil +} + func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { if len(s.JSONSchemas) > 0 { return json.Marshal(s.JSONSchemas) @@ -120,6 +217,37 @@ func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrArray) MarshalCBOR() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return cbor.Marshal(s.JSONSchemas) + } + return cbor.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{Schema: &p} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []JSONSchemaProps + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{JSONSchemas: a} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrArray{} + return nil +} + func (s JSON) MarshalJSON() ([]byte, error) { if len(s.Raw) > 0 { return s.Raw, nil @@ -134,3 +262,34 @@ func (s *JSON) UnmarshalJSON(data []byte) error { } return nil } + +func (s JSON) MarshalCBOR() ([]byte, error) { + // Note that non-semantic whitespace is lost during the transcoding performed here. + // We do not forsee this to be a problem given the current known uses of this type. + // Other limitations that arise when roundtripping JSON via dynamic clients also apply + // here, for example: insignificant whitespace handling, number handling, and map key ordering. + if len(s.Raw) == 0 { + return []byte{cborNullValue}, nil + } + var u any + if err := json.Unmarshal(s.Raw, &u); err != nil { + return nil, err + } + return cbor.Marshal(u) +} + +func (s *JSON) UnmarshalCBOR(data []byte) error { + if len(data) == 0 || data[0] == cborNullValue { + return nil + } + var u any + if err := cbor.Unmarshal(data, &u); err != nil { + return err + } + raw, err := json.Marshal(u) + if err != nil { + return err + } + s.Raw = raw + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go index 43b903878..5e6e82532 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go @@ -20,12 +20,40 @@ import ( "bytes" "errors" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/util/json" ) var jsTrue = []byte("true") var jsFalse = []byte("false") +// The CBOR parsing related constants and functions below are not exported so they can be +// easily removed at a future date when the CBOR library provides equivalent functionality. + +type cborMajorType int + +const ( + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.1 + cborUnsignedInteger cborMajorType = 0 + cborNegativeInteger cborMajorType = 1 + cborByteString cborMajorType = 2 + cborTextString cborMajorType = 3 + cborArray cborMajorType = 4 + cborMap cborMajorType = 5 + cborTag cborMajorType = 6 + cborOther cborMajorType = 7 +) + +const ( + cborFalseValue = 0xf4 + cborTrueValue = 0xf5 + cborNullValue = 0xf6 +) + +func cborType(b byte) cborMajorType { + return cborMajorType(b >> 5) +} + func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) @@ -59,6 +87,39 @@ func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrBool) MarshalCBOR() ([]byte, error) { + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(s.Allows) +} + +func (s *JSONSchemaPropsOrBool) UnmarshalCBOR(data []byte) error { + switch { + case len(data) == 0: + // ideally we would avoid modifying *s here, but we are matching the behavior of UnmarshalJSON + *s = JSONSchemaPropsOrBool{} + return nil + case cborType(data[0]) == cborMap: + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrBool{Allows: true, Schema: &p} + return nil + case data[0] == cborTrueValue: + *s = JSONSchemaPropsOrBool{Allows: true} + return nil + case data[0] == cborFalseValue: + *s = JSONSchemaPropsOrBool{Allows: false} + return nil + default: + // ideally, this case would not also capture a null input value, + // but we are matching the behavior of the UnmarshalJSON + return errors.New("boolean or JSON schema expected") + } +} + func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { if len(s.Property) > 0 { return json.Marshal(s.Property) @@ -91,6 +152,40 @@ func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrStringArray) MarshalCBOR() ([]byte, error) { + if len(s.Property) > 0 { + return cbor.Marshal(s.Property) + } + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(nil) +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []string + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Property: a} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Schema: &p} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrStringArray{} + return nil +} + func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { if len(s.JSONSchemas) > 0 { return json.Marshal(s.JSONSchemas) @@ -120,6 +215,37 @@ func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrArray) MarshalCBOR() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return cbor.Marshal(s.JSONSchemas) + } + return cbor.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{Schema: &p} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []JSONSchemaProps + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{JSONSchemas: a} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrArray{} + return nil +} + func (s JSON) MarshalJSON() ([]byte, error) { if len(s.Raw) > 0 { return s.Raw, nil @@ -134,3 +260,34 @@ func (s *JSON) UnmarshalJSON(data []byte) error { } return nil } + +func (s JSON) MarshalCBOR() ([]byte, error) { + // Note that non-semantic whitespace is lost during the transcoding performed here. + // We do not forsee this to be a problem given the current known uses of this type. + // Other limitations that arise when roundtripping JSON via dynamic clients also apply + // here, for example: insignificant whitespace handling, number handling, and map key ordering. + if len(s.Raw) == 0 { + return []byte{cborNullValue}, nil + } + var u any + if err := json.Unmarshal(s.Raw, &u); err != nil { + return nil, err + } + return cbor.Marshal(u) +} + +func (s *JSON) UnmarshalCBOR(data []byte) error { + if len(data) == 0 || data[0] == cborNullValue { + return nil + } + var u any + if err := cbor.Unmarshal(data, &u); err != nil { + return err + } + raw, err := json.Marshal(u) + if err != nil { + return err + } + s.Raw = raw + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go index 973e614e3..25e43cc00 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) // CustomResourceConversionApplyConfiguration represents a declarative configuration of the CustomResourceConversion type for use // with apply. type CustomResourceConversionApplyConfiguration struct { - Strategy *v1.ConversionStrategyType `json:"strategy,omitempty"` - Webhook *WebhookConversionApplyConfiguration `json:"webhook,omitempty"` + Strategy *apiextensionsv1.ConversionStrategyType `json:"strategy,omitempty"` + Webhook *WebhookConversionApplyConfiguration `json:"webhook,omitempty"` } // CustomResourceConversionApplyConfiguration constructs a declarative configuration of the CustomResourceConversion type for use with @@ -38,7 +38,7 @@ func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { // WithStrategy sets the Strategy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Strategy field is set to the value of the last call. -func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value v1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { +func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value apiextensionsv1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { b.Strategy = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go index 12417b2e6..110620d65 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CustomResourceDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceDefinition type for use // with apply. type CustomResourceDefinitionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CustomResourceDefinitionSpecApplyConfiguration `json:"spec,omitempty"` - Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CustomResourceDefinitionSpecApplyConfiguration `json:"spec,omitempty"` + Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` } // CustomResourceDefinition constructs a declarative configuration of the CustomResourceDefinition type for use with @@ -47,7 +47,7 @@ func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfigu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *CustomResourceDefinitionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *Cus // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string) *CustomResourceDefinitionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string // If called multiple times, the Name field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *Cus // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value stri // If called multiple times, the Namespace field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *C // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,25 +109,25 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value s // If called multiple times, the Generation field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { +func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { +func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeco // overwriting an existing map entries in Labels field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[stri // overwriting an existing map entries in Annotations field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -173,13 +173,13 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { +func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,14 +190,14 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values func (b *CustomResourceDefinitionApplyConfiguration) WithFinalizers(values ...string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CustomResourceDefinitionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -220,5 +220,5 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomRes // GetName retrieves the value of the Name field in the declarative configuration. func (b *CustomResourceDefinitionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go index fb070cb6a..228120520 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CustomResourceDefinitionConditionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionCondition type for use // with apply. type CustomResourceDefinitionConditionApplyConfiguration struct { - Type *v1.CustomResourceDefinitionConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *apiextensionsv1.CustomResourceDefinitionConditionType `json:"type,omitempty"` + Status *apiextensionsv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // CustomResourceDefinitionConditionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionCondition type for use with @@ -42,7 +42,7 @@ func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApply // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value apiextensionsv1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1. // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value apiextensionsv1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go index e66e710c4..f8d5be3c7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) // CustomResourceSubresourcesApplyConfiguration represents a declarative configuration of the CustomResourceSubresources type for use // with apply. type CustomResourceSubresourcesApplyConfiguration struct { - Status *v1.CustomResourceSubresourceStatus `json:"status,omitempty"` + Status *apiextensionsv1.CustomResourceSubresourceStatus `json:"status,omitempty"` Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` } @@ -38,7 +38,7 @@ func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value v1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { +func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value apiextensionsv1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go index 1acbe6130..d6595ce1d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) // JSONSchemaPropsApplyConfiguration represents a declarative configuration of the JSONSchemaProps type for use // with apply. type JSONSchemaPropsApplyConfiguration struct { ID *string `json:"id,omitempty"` - Schema *v1.JSONSchemaURL `json:"$schema,omitempty"` + Schema *apiextensionsv1.JSONSchemaURL `json:"$schema,omitempty"` Ref *string `json:"$ref,omitempty"` Description *string `json:"description,omitempty"` Type *string `json:"type,omitempty"` Format *string `json:"format,omitempty"` Title *string `json:"title,omitempty"` - Default *v1.JSON `json:"default,omitempty"` + Default *apiextensionsv1.JSON `json:"default,omitempty"` Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"` Minimum *float64 `json:"minimum,omitempty"` @@ -44,23 +44,23 @@ type JSONSchemaPropsApplyConfiguration struct { MinItems *int64 `json:"minItems,omitempty"` UniqueItems *bool `json:"uniqueItems,omitempty"` MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []v1.JSON `json:"enum,omitempty"` + Enum []apiextensionsv1.JSON `json:"enum,omitempty"` MaxProperties *int64 `json:"maxProperties,omitempty"` MinProperties *int64 `json:"minProperties,omitempty"` Required []string `json:"required,omitempty"` - Items *v1.JSONSchemaPropsOrArray `json:"items,omitempty"` + Items *apiextensionsv1.JSONSchemaPropsOrArray `json:"items,omitempty"` AllOf []JSONSchemaPropsApplyConfiguration `json:"allOf,omitempty"` OneOf []JSONSchemaPropsApplyConfiguration `json:"oneOf,omitempty"` AnyOf []JSONSchemaPropsApplyConfiguration `json:"anyOf,omitempty"` Not *JSONSchemaPropsApplyConfiguration `json:"not,omitempty"` Properties map[string]JSONSchemaPropsApplyConfiguration `json:"properties,omitempty"` - AdditionalProperties *v1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` + AdditionalProperties *apiextensionsv1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` PatternProperties map[string]JSONSchemaPropsApplyConfiguration `json:"patternProperties,omitempty"` - Dependencies *v1.JSONSchemaDependencies `json:"dependencies,omitempty"` - AdditionalItems *v1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` - Definitions *v1.JSONSchemaDefinitions `json:"definitions,omitempty"` + Dependencies *apiextensionsv1.JSONSchemaDependencies `json:"dependencies,omitempty"` + AdditionalItems *apiextensionsv1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` + Definitions *apiextensionsv1.JSONSchemaDefinitions `json:"definitions,omitempty"` ExternalDocs *ExternalDocumentationApplyConfiguration `json:"externalDocs,omitempty"` - Example *v1.JSON `json:"example,omitempty"` + Example *apiextensionsv1.JSON `json:"example,omitempty"` Nullable *bool `json:"nullable,omitempty"` XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty"` XEmbeddedResource *bool `json:"x-kubernetes-embedded-resource,omitempty"` @@ -68,7 +68,7 @@ type JSONSchemaPropsApplyConfiguration struct { XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty"` XListType *string `json:"x-kubernetes-list-type,omitempty"` XMapType *string `json:"x-kubernetes-map-type,omitempty"` - XValidations *v1.ValidationRules `json:"x-kubernetes-validations,omitempty"` + XValidations *apiextensionsv1.ValidationRules `json:"x-kubernetes-validations,omitempty"` } // JSONSchemaPropsApplyConfiguration constructs a declarative configuration of the JSONSchemaProps type for use with @@ -88,7 +88,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithID(value string) *JSONSchemaProp // WithSchema sets the Schema field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Schema field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value v1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value apiextensionsv1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { b.Schema = &value return b } @@ -136,7 +136,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithTitle(value string) *JSONSchemaP // WithDefault sets the Default field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Default field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value v1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value apiextensionsv1.JSON) *JSONSchemaPropsApplyConfiguration { b.Default = &value return b } @@ -232,7 +232,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithMultipleOf(value float64) *JSONS // WithEnum adds the given value to the Enum field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Enum field. -func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...v1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...apiextensionsv1.JSON) *JSONSchemaPropsApplyConfiguration { for i := range values { b.Enum = append(b.Enum, values[i]) } @@ -268,7 +268,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithRequired(values ...string) *JSON // WithItems sets the Items field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Items field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithItems(value v1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithItems(value apiextensionsv1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { b.Items = &value return b } @@ -337,7 +337,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithProperties(entries map[string]JS // WithAdditionalProperties sets the AdditionalProperties field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalProperties field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value v1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value apiextensionsv1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalProperties = &value return b } @@ -359,7 +359,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithPatternProperties(entries map[st // WithDependencies sets the Dependencies field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Dependencies field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value apiextensionsv1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { b.Dependencies = &value return b } @@ -367,7 +367,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1.JSONSchema // WithAdditionalItems sets the AdditionalItems field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalItems field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value apiextensionsv1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalItems = &value return b } @@ -375,7 +375,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1.JSONSch // WithDefinitions sets the Definitions field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Definitions field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value v1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value apiextensionsv1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { b.Definitions = &value return b } @@ -391,7 +391,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithExternalDocs(value *ExternalDocu // WithExample sets the Example field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Example field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithExample(value v1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithExample(value apiextensionsv1.JSON) *JSONSchemaPropsApplyConfiguration { b.Example = &value return b } @@ -457,7 +457,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithXMapType(value string) *JSONSche // WithXValidations sets the XValidations field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the XValidations field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value v1.ValidationRules) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value apiextensionsv1.ValidationRules) *JSONSchemaPropsApplyConfiguration { b.XValidations = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go index e5fc80c7e..5ee414928 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) // ValidationRuleApplyConfiguration represents a declarative configuration of the ValidationRule type for use // with apply. type ValidationRuleApplyConfiguration struct { - Rule *string `json:"rule,omitempty"` - Message *string `json:"message,omitempty"` - MessageExpression *string `json:"messageExpression,omitempty"` - Reason *v1.FieldValueErrorReason `json:"reason,omitempty"` - FieldPath *string `json:"fieldPath,omitempty"` - OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` + Rule *string `json:"rule,omitempty"` + Message *string `json:"message,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` + Reason *apiextensionsv1.FieldValueErrorReason `json:"reason,omitempty"` + FieldPath *string `json:"fieldPath,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } // ValidationRuleApplyConfiguration constructs a declarative configuration of the ValidationRule type for use with @@ -66,7 +66,7 @@ func (b *ValidationRuleApplyConfiguration) WithMessageExpression(value string) * // WithReason sets the Reason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reason field is set to the value of the last call. -func (b *ValidationRuleApplyConfiguration) WithReason(value v1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { +func (b *ValidationRuleApplyConfiguration) WithReason(value apiextensionsv1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { b.Reason = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go index b0171f16a..f652c96d5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go @@ -19,15 +19,15 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) // CustomResourceConversionApplyConfiguration represents a declarative configuration of the CustomResourceConversion type for use // with apply. type CustomResourceConversionApplyConfiguration struct { - Strategy *v1beta1.ConversionStrategyType `json:"strategy,omitempty"` - WebhookClientConfig *WebhookClientConfigApplyConfiguration `json:"webhookClientConfig,omitempty"` - ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` + Strategy *apiextensionsv1beta1.ConversionStrategyType `json:"strategy,omitempty"` + WebhookClientConfig *WebhookClientConfigApplyConfiguration `json:"webhookClientConfig,omitempty"` + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` } // CustomResourceConversionApplyConfiguration constructs a declarative configuration of the CustomResourceConversion type for use with @@ -39,7 +39,7 @@ func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { // WithStrategy sets the Strategy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Strategy field is set to the value of the last call. -func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value v1beta1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { +func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value apiextensionsv1beta1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { b.Strategy = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go index bbda37932..d56cff21f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go @@ -47,7 +47,7 @@ func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfigu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *CustomResourceDefinitionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *Cus // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string) *CustomResourceDefinitionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string // If called multiple times, the Name field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *Cus // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value stri // If called multiple times, the Namespace field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *C // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value s // If called multiple times, the Generation field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeco // overwriting an existing map entries in Labels field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[stri // overwriting an existing map entries in Annotations field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values func (b *CustomResourceDefinitionApplyConfiguration) WithFinalizers(values ...string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -220,5 +220,5 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomRes // GetName retrieves the value of the Name field in the declarative configuration. func (b *CustomResourceDefinitionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go index 7f2f132ac..4d19e1b5b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CustomResourceDefinitionConditionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionCondition type for use // with apply. type CustomResourceDefinitionConditionApplyConfiguration struct { - Type *v1beta1.CustomResourceDefinitionConditionType `json:"type,omitempty"` - Status *v1beta1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *apiextensionsv1beta1.CustomResourceDefinitionConditionType `json:"type,omitempty"` + Status *apiextensionsv1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // CustomResourceDefinitionConditionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionCondition type for use with @@ -42,7 +42,7 @@ func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApply // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1beta1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value apiextensionsv1beta1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1b // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value apiextensionsv1beta1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go index 3847b8789..3ee82a037 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go @@ -19,14 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) // CustomResourceSubresourcesApplyConfiguration represents a declarative configuration of the CustomResourceSubresources type for use // with apply. type CustomResourceSubresourcesApplyConfiguration struct { - Status *v1beta1.CustomResourceSubresourceStatus `json:"status,omitempty"` - Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` + Status *apiextensionsv1beta1.CustomResourceSubresourceStatus `json:"status,omitempty"` + Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` } // CustomResourceSubresourcesApplyConfiguration constructs a declarative configuration of the CustomResourceSubresources type for use with @@ -38,7 +38,7 @@ func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value v1beta1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { +func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value apiextensionsv1beta1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go index 9c588e2a1..b90b9281c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go @@ -19,20 +19,20 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) // JSONSchemaPropsApplyConfiguration represents a declarative configuration of the JSONSchemaProps type for use // with apply. type JSONSchemaPropsApplyConfiguration struct { ID *string `json:"id,omitempty"` - Schema *v1beta1.JSONSchemaURL `json:"$schema,omitempty"` + Schema *apiextensionsv1beta1.JSONSchemaURL `json:"$schema,omitempty"` Ref *string `json:"$ref,omitempty"` Description *string `json:"description,omitempty"` Type *string `json:"type,omitempty"` Format *string `json:"format,omitempty"` Title *string `json:"title,omitempty"` - Default *v1beta1.JSON `json:"default,omitempty"` + Default *apiextensionsv1beta1.JSON `json:"default,omitempty"` Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"` Minimum *float64 `json:"minimum,omitempty"` @@ -44,23 +44,23 @@ type JSONSchemaPropsApplyConfiguration struct { MinItems *int64 `json:"minItems,omitempty"` UniqueItems *bool `json:"uniqueItems,omitempty"` MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []v1beta1.JSON `json:"enum,omitempty"` + Enum []apiextensionsv1beta1.JSON `json:"enum,omitempty"` MaxProperties *int64 `json:"maxProperties,omitempty"` MinProperties *int64 `json:"minProperties,omitempty"` Required []string `json:"required,omitempty"` - Items *v1beta1.JSONSchemaPropsOrArray `json:"items,omitempty"` + Items *apiextensionsv1beta1.JSONSchemaPropsOrArray `json:"items,omitempty"` AllOf []JSONSchemaPropsApplyConfiguration `json:"allOf,omitempty"` OneOf []JSONSchemaPropsApplyConfiguration `json:"oneOf,omitempty"` AnyOf []JSONSchemaPropsApplyConfiguration `json:"anyOf,omitempty"` Not *JSONSchemaPropsApplyConfiguration `json:"not,omitempty"` Properties map[string]JSONSchemaPropsApplyConfiguration `json:"properties,omitempty"` - AdditionalProperties *v1beta1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` + AdditionalProperties *apiextensionsv1beta1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` PatternProperties map[string]JSONSchemaPropsApplyConfiguration `json:"patternProperties,omitempty"` - Dependencies *v1beta1.JSONSchemaDependencies `json:"dependencies,omitempty"` - AdditionalItems *v1beta1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` - Definitions *v1beta1.JSONSchemaDefinitions `json:"definitions,omitempty"` + Dependencies *apiextensionsv1beta1.JSONSchemaDependencies `json:"dependencies,omitempty"` + AdditionalItems *apiextensionsv1beta1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` + Definitions *apiextensionsv1beta1.JSONSchemaDefinitions `json:"definitions,omitempty"` ExternalDocs *ExternalDocumentationApplyConfiguration `json:"externalDocs,omitempty"` - Example *v1beta1.JSON `json:"example,omitempty"` + Example *apiextensionsv1beta1.JSON `json:"example,omitempty"` Nullable *bool `json:"nullable,omitempty"` XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty"` XEmbeddedResource *bool `json:"x-kubernetes-embedded-resource,omitempty"` @@ -68,7 +68,7 @@ type JSONSchemaPropsApplyConfiguration struct { XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty"` XListType *string `json:"x-kubernetes-list-type,omitempty"` XMapType *string `json:"x-kubernetes-map-type,omitempty"` - XValidations *v1beta1.ValidationRules `json:"x-kubernetes-validations,omitempty"` + XValidations *apiextensionsv1beta1.ValidationRules `json:"x-kubernetes-validations,omitempty"` } // JSONSchemaPropsApplyConfiguration constructs a declarative configuration of the JSONSchemaProps type for use with @@ -88,7 +88,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithID(value string) *JSONSchemaProp // WithSchema sets the Schema field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Schema field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value v1beta1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value apiextensionsv1beta1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { b.Schema = &value return b } @@ -136,7 +136,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithTitle(value string) *JSONSchemaP // WithDefault sets the Default field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Default field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value apiextensionsv1beta1.JSON) *JSONSchemaPropsApplyConfiguration { b.Default = &value return b } @@ -232,7 +232,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithMultipleOf(value float64) *JSONS // WithEnum adds the given value to the Enum field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Enum field. -func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...apiextensionsv1beta1.JSON) *JSONSchemaPropsApplyConfiguration { for i := range values { b.Enum = append(b.Enum, values[i]) } @@ -268,7 +268,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithRequired(values ...string) *JSON // WithItems sets the Items field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Items field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithItems(value v1beta1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithItems(value apiextensionsv1beta1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { b.Items = &value return b } @@ -337,7 +337,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithProperties(entries map[string]JS // WithAdditionalProperties sets the AdditionalProperties field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalProperties field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value v1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value apiextensionsv1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalProperties = &value return b } @@ -359,7 +359,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithPatternProperties(entries map[st // WithDependencies sets the Dependencies field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Dependencies field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1beta1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value apiextensionsv1beta1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { b.Dependencies = &value return b } @@ -367,7 +367,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1beta1.JSONS // WithAdditionalItems sets the AdditionalItems field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalItems field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value apiextensionsv1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalItems = &value return b } @@ -375,7 +375,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1beta1.JS // WithDefinitions sets the Definitions field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Definitions field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value v1beta1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value apiextensionsv1beta1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { b.Definitions = &value return b } @@ -391,7 +391,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithExternalDocs(value *ExternalDocu // WithExample sets the Example field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Example field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithExample(value v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithExample(value apiextensionsv1beta1.JSON) *JSONSchemaPropsApplyConfiguration { b.Example = &value return b } @@ -457,7 +457,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithXMapType(value string) *JSONSche // WithXValidations sets the XValidations field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the XValidations field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value v1beta1.ValidationRules) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value apiextensionsv1beta1.ValidationRules) *JSONSchemaPropsApplyConfiguration { b.XValidations = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go index e245ded1a..c9b3da89b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) // ValidationRuleApplyConfiguration represents a declarative configuration of the ValidationRule type for use // with apply. type ValidationRuleApplyConfiguration struct { - Rule *string `json:"rule,omitempty"` - Message *string `json:"message,omitempty"` - MessageExpression *string `json:"messageExpression,omitempty"` - Reason *v1beta1.FieldValueErrorReason `json:"reason,omitempty"` - FieldPath *string `json:"fieldPath,omitempty"` - OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` + Rule *string `json:"rule,omitempty"` + Message *string `json:"message,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` + Reason *apiextensionsv1beta1.FieldValueErrorReason `json:"reason,omitempty"` + FieldPath *string `json:"fieldPath,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } // ValidationRuleApplyConfiguration constructs a declarative configuration of the ValidationRule type for use with @@ -66,7 +66,7 @@ func (b *ValidationRuleApplyConfiguration) WithMessageExpression(value string) * // WithReason sets the Reason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reason field is set to the value of the last call. -func (b *ValidationRuleApplyConfiguration) WithReason(value v1beta1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { +func (b *ValidationRuleApplyConfiguration) WithReason(value apiextensionsv1beta1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { b.Reason = &value return b } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index 449285e17..93dd79d63 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -19,8 +19,8 @@ limitations under the License. package clientset import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go index 0bdc44c40..cd766a2dc 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *ApiextensionsV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := apiextensionsv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go index 03d3dea82..1197071d0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + applyconfigurationapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -38,36 +38,40 @@ type CustomResourceDefinitionsGetter interface { // CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. type CustomResourceDefinitionInterface interface { - Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) - Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + Create(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.CreateOptions) (*apiextensionsv1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.UpdateOptions) (*apiextensionsv1.CustomResourceDefinition, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.UpdateOptions) (*apiextensionsv1.CustomResourceDefinition, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiextensionsv1.CustomResourceDefinition, error) + List(ctx context.Context, opts metav1.ListOptions) (*apiextensionsv1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) - Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiextensionsv1.CustomResourceDefinition, err error) + Apply(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *apiextensionsv1.CustomResourceDefinition, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) + ApplyStatus(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *apiextensionsv1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } // customResourceDefinitions implements CustomResourceDefinitionInterface type customResourceDefinitions struct { - *gentype.ClientWithListAndApply[*v1.CustomResourceDefinition, *v1.CustomResourceDefinitionList, *apiextensionsv1.CustomResourceDefinitionApplyConfiguration] + *gentype.ClientWithListAndApply[*apiextensionsv1.CustomResourceDefinition, *apiextensionsv1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration] } // newCustomResourceDefinitions returns a CustomResourceDefinitions func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { return &customResourceDefinitions{ - gentype.NewClientWithListAndApply[*v1.CustomResourceDefinition, *v1.CustomResourceDefinitionList, *apiextensionsv1.CustomResourceDefinitionApplyConfiguration]( + gentype.NewClientWithListAndApply[*apiextensionsv1.CustomResourceDefinition, *apiextensionsv1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration]( "customresourcedefinitions", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.CustomResourceDefinition { return &v1.CustomResourceDefinition{} }, - func() *v1.CustomResourceDefinitionList { return &v1.CustomResourceDefinitionList{} }), + func() *apiextensionsv1.CustomResourceDefinition { return &apiextensionsv1.CustomResourceDefinition{} }, + func() *apiextensionsv1.CustomResourceDefinitionList { + return &apiextensionsv1.CustomResourceDefinitionList{} + }, + gentype.PrefersProtobuf[*apiextensionsv1.CustomResourceDefinition](), + ), } } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go index 657ce2ca8..e45f25d58 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *ApiextensionsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := apiextensionsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index 47f965597..e7ea4e971 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + applyconfigurationapiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -38,36 +38,42 @@ type CustomResourceDefinitionsGetter interface { // CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. type CustomResourceDefinitionInterface interface { - Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*v1beta1.CustomResourceDefinition, error) - Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + Create(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) + List(ctx context.Context, opts v1.ListOptions) (*apiextensionsv1beta1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) - Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiextensionsv1beta1.CustomResourceDefinition, err error) + Apply(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *apiextensionsv1beta1.CustomResourceDefinition, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) + ApplyStatus(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *apiextensionsv1beta1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } // customResourceDefinitions implements CustomResourceDefinitionInterface type customResourceDefinitions struct { - *gentype.ClientWithListAndApply[*v1beta1.CustomResourceDefinition, *v1beta1.CustomResourceDefinitionList, *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration] + *gentype.ClientWithListAndApply[*apiextensionsv1beta1.CustomResourceDefinition, *apiextensionsv1beta1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration] } // newCustomResourceDefinitions returns a CustomResourceDefinitions func newCustomResourceDefinitions(c *ApiextensionsV1beta1Client) *customResourceDefinitions { return &customResourceDefinitions{ - gentype.NewClientWithListAndApply[*v1beta1.CustomResourceDefinition, *v1beta1.CustomResourceDefinitionList, *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration]( + gentype.NewClientWithListAndApply[*apiextensionsv1beta1.CustomResourceDefinition, *apiextensionsv1beta1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration]( "customresourcedefinitions", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.CustomResourceDefinition { return &v1beta1.CustomResourceDefinition{} }, - func() *v1beta1.CustomResourceDefinitionList { return &v1beta1.CustomResourceDefinitionList{} }), + func() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{} + }, + func() *apiextensionsv1beta1.CustomResourceDefinitionList { + return &apiextensionsv1beta1.CustomResourceDefinitionList{} + }, + gentype.PrefersProtobuf[*apiextensionsv1beta1.CustomResourceDefinition](), + ), } } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index b41d13e53..c562263f3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -17,6 +17,8 @@ limitations under the License. package features import ( + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" ) @@ -25,18 +27,15 @@ const ( // Every feature gate should add method here following this template: // // // owner: @username - // // alpha: v1.4 // MyFeature() bool // owner: @alexzielenski - // alpha: v1.28 // // Ignores errors raised on unchanged fields of Custom Resources // across UPDATE/PATCH requests. CRDValidationRatcheting featuregate.Feature = "CRDValidationRatcheting" // owner: @jpbetz - // alpha: v1.30 // // CustomResourceDefinitions may include SelectableFields to declare which fields // may be used as field selectors. @@ -44,13 +43,23 @@ const ( ) func init() { - utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates) + runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) } -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. -// To add a new feature, define a key for it above and add it here. The features will be +// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. +// To add a new feature, define a key for it above and add it below. The features will be // available throughout Kubernetes binaries. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - CRDValidationRatcheting: {Default: true, PreRelease: featuregate.Beta}, - CustomResourceFieldSelectors: {Default: true, PreRelease: featuregate.Beta}, +// To support n-3 compatibility version, features may only be removed 3 releases after graduation. +// +// Entries are alphabetized. +var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + CRDValidationRatcheting: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + CustomResourceFieldSelectors: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 57e0e71f6..6a3ab8f24 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -54,6 +54,7 @@ var knownReasons = map[metav1.StatusReason]struct{}{ metav1.StatusReasonGone: {}, metav1.StatusReasonInvalid: {}, metav1.StatusReasonServerTimeout: {}, + metav1.StatusReasonStoreReadError: {}, metav1.StatusReasonTimeout: {}, metav1.StatusReasonTooManyRequests: {}, metav1.StatusReasonBadRequest: {}, @@ -775,6 +776,12 @@ func IsUnexpectedObjectError(err error) bool { return err != nil && (ok || errors.As(err, &uoe)) } +// IsStoreReadError determines if err is due to either failure to transform the +// data from the storage, or failure to decode the object appropriately. +func IsStoreReadError(err error) bool { + return ReasonForError(err) == metav1.StatusReasonStoreReadError +} + // SuggestsClientDelay returns true if this error suggests a client delay as well as the // suggested seconds to wait, or false if the error does not imply a wait. It does not // address whether the error *should* be retried, since some errors (like a 3xx) may diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index 1e1330fff..3bd8bf535 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -10,5 +10,6 @@ reviewers: - mikedanese - liggitt - janetkuo - - ncdc - dims +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 50af8334f..d0aada9dd 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -20,7 +20,7 @@ import ( "bytes" "errors" "fmt" - "math" + math "math" "math/big" "strconv" "strings" @@ -460,9 +460,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { } } -// AsApproximateFloat64 returns a float64 representation of the quantity which may -// lose precision. If the value of the quantity is outside the range of a float64 -// +Inf/-Inf will be returned. +// AsApproximateFloat64 returns a float64 representation of the quantity which +// may lose precision. If precision matter more than performance, see +// AsFloat64Slow. If the value of the quantity is outside the range of a +// float64 +Inf/-Inf will be returned. func (q *Quantity) AsApproximateFloat64() float64 { var base float64 var exponent int @@ -480,6 +481,36 @@ func (q *Quantity) AsApproximateFloat64() float64 { return base * math.Pow10(exponent) } +// AsFloat64Slow returns a float64 representation of the quantity. This is +// more precise than AsApproximateFloat64 but significantly slower. If the +// value of the quantity is outside the range of a float64 +Inf/-Inf will be +// returned. +func (q *Quantity) AsFloat64Slow() float64 { + infDec := q.AsDec() + + var absScale int64 + if infDec.Scale() < 0 { + absScale = int64(-infDec.Scale()) + } else { + absScale = int64(infDec.Scale()) + } + pow10AbsScale := big.NewInt(10) + pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil) + + var resultBigFloat *big.Float + if infDec.Scale() < 0 { + resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale) + resultBigFloat = new(big.Float).SetInt(resultBigInt) + } else { + pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale) + resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig()) + resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat) + } + + result, _ := resultBigFloat.Float64() + return result +} + // AsInt64 returns a representation of the current value as an int64 if a fast conversion // is possible. If false is returned, callers must use the inf.Dec form of this quantity. func (q *Quantity) AsInt64() (int64, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go index 593d7ba8c..54a2883a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -50,7 +50,7 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie } } if err := ValidateAnnotationsSize(annotations); err != nil { - allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB)) + allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, TotalAnnotationSizeLimitB)) } return allErrs } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go index 472a9aeb2..585d7f44b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -24,16 +24,16 @@ import ( ) // Scheme is the registry for any type that adheres to the meta API spec. -var scheme = runtime.NewScheme() +var Scheme = runtime.NewScheme() // Codecs provides access to encoding and decoding for the scheme. -var Codecs = serializer.NewCodecFactory(scheme) +var Codecs = serializer.NewCodecFactory(Scheme) // ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) // Unlike other API groups, meta internal knows about all meta external versions, but keeps // the logic for conversion private. func init() { - utilruntime.Must(internalversion.AddToScheme(scheme)) + utilruntime.Must(internalversion.AddToScheme(Scheme)) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index e7e5c152d..ec414a84b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -11,6 +11,7 @@ reviewers: - luxas - janetkuo - justinsb - - ncdc - soltysh - dims +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 229ea2c2c..9ee6c0591 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1355,187 +1355,190 @@ func init() { } var fileDescriptor_a8431b6e0aeeb761 = []byte{ - // 2873 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57, - 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa, - 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44, - 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49, - 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f, - 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84, - 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7, - 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e, - 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26, - 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03, - 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5, - 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf, - 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b, - 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e, - 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76, - 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f, - 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28, - 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26, - 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28, - 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29, - 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f, - 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, - 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d, - 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86, - 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5, - 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, - 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e, - 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a, - 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb, - 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71, - 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53, - 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7, - 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, - 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5, - 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95, - 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b, - 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8, - 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75, - 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b, - 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09, - 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, - 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3, - 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3, - 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74, - 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81, - 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2, - 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22, - 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1, - 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60, - 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6, - 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1, - 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82, - 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8, - 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, - 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56, - 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e, - 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47, - 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc, - 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79, - 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13, - 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77, - 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a, - 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e, - 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45, - 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98, - 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3, - 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e, - 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18, - 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d, - 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69, - 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91, - 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54, - 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63, - 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b, - 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92, - 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23, - 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57, - 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b, - 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a, - 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f, - 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c, - 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f, - 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e, - 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41, - 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37, - 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf, - 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73, - 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15, - 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88, - 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69, - 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7, - 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d, - 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7, - 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7, - 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14, - 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1, - 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17, - 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc, - 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f, - 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98, - 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b, - 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51, - 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c, - 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a, - 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3, - 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e, - 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd, - 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf, - 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b, - 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65, - 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6, - 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5, - 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59, - 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc, - 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17, - 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6, - 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75, - 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43, - 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86, - 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c, - 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53, - 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54, - 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, - 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2, - 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91, - 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77, - 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4, - 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1, - 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0, - 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8, - 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8, - 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30, - 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a, - 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39, - 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb, - 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95, - 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0, - 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04, - 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66, - 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d, - 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0, - 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1, - 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09, - 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd, - 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70, - 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a, - 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2, - 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5, - 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9, - 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80, - 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a, - 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef, - 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2, - 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, - 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06, - 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f, - 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a, - 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81, - 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52, - 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f, - 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad, - 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78, - 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03, - 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52, - 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a, - 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19, - 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90, - 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8, - 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda, - 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98, - 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73, - 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0, - 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82, - 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc, - 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f, - 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f, - 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31, - 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31, - 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00, + // 2928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47, + 0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e, + 0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c, + 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93, + 0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c, + 0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44, + 0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb, + 0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2, + 0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d, + 0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3, + 0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa, + 0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc, + 0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda, + 0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3, + 0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb, + 0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf, + 0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c, + 0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93, + 0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4, + 0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c, + 0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f, + 0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc, + 0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f, + 0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61, + 0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62, + 0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d, + 0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3, + 0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6, + 0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55, + 0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49, + 0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33, + 0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6, + 0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05, + 0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea, + 0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a, + 0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f, + 0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0, + 0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75, + 0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17, + 0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c, + 0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69, + 0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5, + 0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb, + 0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef, + 0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42, + 0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b, + 0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4, + 0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc, + 0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c, + 0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5, + 0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f, + 0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f, + 0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9, + 0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80, + 0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1, + 0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38, + 0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e, + 0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79, + 0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b, + 0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77, + 0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5, + 0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c, + 0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc, + 0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69, + 0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e, + 0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30, + 0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76, + 0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a, + 0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0, + 0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7, + 0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d, + 0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e, + 0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa, + 0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f, + 0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12, + 0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b, + 0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d, + 0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05, + 0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19, + 0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10, + 0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d, + 0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe, + 0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2, + 0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f, + 0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27, + 0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb, + 0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52, + 0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd, + 0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb, + 0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9, + 0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d, + 0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c, + 0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70, + 0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53, + 0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a, + 0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde, + 0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14, + 0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7, + 0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97, + 0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e, + 0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60, + 0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e, + 0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38, + 0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe, + 0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed, + 0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c, + 0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf, + 0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec, + 0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b, + 0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2, + 0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94, + 0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40, + 0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8, + 0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba, + 0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0, + 0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e, + 0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09, + 0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f, + 0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd, + 0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03, + 0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53, + 0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8, + 0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d, + 0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03, + 0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0, + 0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7, + 0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b, + 0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13, + 0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c, + 0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b, + 0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86, + 0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e, + 0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18, + 0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5, + 0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77, + 0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, + 0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86, + 0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, + 0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32, + 0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95, + 0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e, + 0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30, + 0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a, + 0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac, + 0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63, + 0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8, + 0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51, + 0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b, + 0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64, + 0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b, + 0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99, + 0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71, + 0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e, + 0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c, + 0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b, + 0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84, + 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e, + 0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4, + 0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9, + 0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2, + 0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa, + 0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2, + 0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf, + 0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03, + 0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e, + 0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9, + 0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c, + 0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde, + 0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, + 0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2, + 0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8, + 0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5, + 0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed, + 0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72, + 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb, + 0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a, + 0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92, + 0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e, + 0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1, + 0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd, + 0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -1983,6 +1986,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil { + i-- + if *m.IgnoreStoreReadErrorWithClusterBreakingPotential { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if len(m.DryRun) > 0 { for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DryRun[iNdEx]) @@ -3773,6 +3786,9 @@ func (m *DeleteOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil { + n += 2 + } return n } @@ -4506,6 +4522,7 @@ func (this *DeleteOptions) String() string { `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`, `}`, }, "") return s @@ -6456,6 +6473,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 18dd0b067..865d3e7ca 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -315,6 +315,21 @@ message DeleteOptions { // +optional // +listType=atomic repeated string dryRun = 5; + + // if set to true, it will trigger an unsafe deletion of the resource in + // case the normal deletion flow fails with a corrupt object error. + // A resource is considered corrupt if it can not be retrieved from + // the underlying storage successfully because of a) its data can + // not be transformed e.g. decryption failure, or b) it fails + // to decode into an object. + // NOTE: unsafe deletion ignores finalizer constraints, skips + // precondition checks, and removes the object from the storage. + // WARNING: This may potentially break the cluster if the workload + // associated with the resource being unsafe-deleted relies on normal + // deletion flow. Use only if you REALLY know what you are doing. + // The default value is false, and the user must opt in to enable it + // +optional + optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6; } // Duration is a wrapper around time.Duration which supports correct diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 473adb9ef..4cf3f4795 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -439,6 +439,20 @@ const ( // // The annotation is added to a "Bookmark" event. InitialEventsAnnotationKey = "k8s.io/initial-events-end" + + // InitialEventsListBlueprintAnnotationKey is the name of the key + // where an empty, versioned list is encoded in the requested format + // (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string. + // + // This encoding matches the request encoding format, which may be + // protobuf, JSON, CBOR, or others, depending on what the client requested. + // This ensures that the reconstructed list can be processed through the + // same decoder chain that would handle a standard LIST call response. + // + // The annotation is added to a "Bookmark" event and is used by clients + // to guarantee the format consistency when reconstructing + // the list during WatchList processing. + InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint" ) // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch @@ -546,6 +560,21 @@ type DeleteOptions struct { // +optional // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` + + // if set to true, it will trigger an unsafe deletion of the resource in + // case the normal deletion flow fails with a corrupt object error. + // A resource is considered corrupt if it can not be retrieved from + // the underlying storage successfully because of a) its data can + // not be transformed e.g. decryption failure, or b) it fails + // to decode into an object. + // NOTE: unsafe deletion ignores finalizer constraints, skips + // precondition checks, and removes the object from the storage. + // WARNING: This may potentially break the cluster if the workload + // associated with the resource being unsafe-deleted relies on normal + // deletion flow. Use only if you REALLY know what you are doing. + // The default value is false, and the user must opt in to enable it + // +optional + IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"` } const ( @@ -902,6 +931,22 @@ const ( // Status code 500 StatusReasonServerTimeout StatusReason = "ServerTimeout" + // StatusReasonStoreReadError means that the server encountered an error while + // retrieving resources from the backend object store. + // This may be due to backend database error, or because processing of the read + // resource failed. + // Details: + // "kind" string - the kind attribute of the resource being acted on. + // "name" string - the prefix where the reading error(s) occurred + // "causes" []StatusCause + // - (optional): + // - "type" CauseType - CauseTypeUnexpectedServerResponse + // - "message" string - the error message from the store backend + // - "field" string - the full path with the key of the resource that failed reading + // + // Status code 500 + StatusReasonStoreReadError StatusReason = "StorageReadError" + // StatusReasonTimeout means that the request could not be completed within the given time. // Clients can get this response only when they specified a timeout param in the request, // or if the server cannot complete the operation within a reasonable amount of time. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 1fa37215c..405496d3d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -129,6 +129,7 @@ var map_DeleteOptions = map[string]string{ "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it", } func (DeleteOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 0f58d66c0..71f7b163a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -20,6 +20,7 @@ import ( gojson "encoding/json" "fmt" "io" + "math/big" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -125,6 +126,29 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err return i, true, nil } +// NestedNumberAsFloat64 returns the float64 value of a nested field. If the field's value is a +// float64, it is returned. If the field's value is an int64 that can be losslessly converted to +// float64, it will be converted and returned. Returns false if value is not found and an error if +// not a float64 or an int64 that can be accurately represented as a float64. +func NestedNumberAsFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err + } + switch x := val.(type) { + case int64: + f, accuracy := big.NewInt(x).Float64() + if accuracy != big.Exact { + return 0, false, fmt.Errorf("%v accessor error: int64 value %v cannot be losslessly converted to float64", jsonPath(fields), x) + } + return f, true, nil + case float64: + return x, true, nil + default: + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64 or int64", jsonPath(fields), val, val) + } +} + // NestedStringSlice returns a copy of []string value of a nested field. // Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice. func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 40d289f37..5e36a91ee 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -450,10 +450,14 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { } func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { - items, found, err := NestedSlice(u.Object, "metadata", "managedFields") + v, found, err := NestedFieldNoCopy(u.Object, "metadata", "managedFields") if !found || err != nil { return nil } + items, ok := v.([]interface{}) + if !ok { + return nil + } managedFields := []metav1.ManagedFieldsEntry{} for _, item := range items { m, ok := item.(map[string]interface{}) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index 3eba5ba54..b1eb1bbfc 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -26,6 +26,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + + "k8s.io/utils/ptr" ) // LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options @@ -165,6 +167,7 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) } allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateIgnoreStoreReadError(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), options)...) return allErrs } @@ -186,15 +189,16 @@ func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { allErrs := field.ErrorList{} - if patchType != types.ApplyPatchType { - if options.Force != nil { - allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) - } - } else { + switch patchType { + case types.ApplyYAMLPatchType, types.ApplyCBORPatchType: if options.FieldManager == "" { // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) } + default: + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } } allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) @@ -212,7 +216,7 @@ func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorL // considered as not set and is defaulted by the rest of the process // (unless apply is used, in which case it is required). if len(fieldManager) > FieldManagerMaxLength { - allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, FieldManagerMaxLength)) } // Verify that all characters are printable. for i, r := range fieldManager { @@ -277,7 +281,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...) if len(fields.Subresource) > MaxSubresourceNameLength { - allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), "" /*unused*/, MaxSubresourceNameLength)) } } return allErrs @@ -334,12 +338,12 @@ func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.Er allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) } if len(condition.Reason) > maxReasonLen { - allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), "" /*unused*/, maxReasonLen)) } } if len(condition.Message) > maxMessageLen { - allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), "" /*unused*/, maxMessageLen)) } return allErrs @@ -357,3 +361,31 @@ func isValidConditionReason(value string) []string { } return nil } + +// ValidateIgnoreStoreReadError validates that delete options are valid when +// ignoreStoreReadErrorWithClusterBreakingPotential is enabled +func ValidateIgnoreStoreReadError(fldPath *field.Path, options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + if enabled := ptr.Deref[bool](options.IgnoreStoreReadErrorWithClusterBreakingPotential, false); !enabled { + return allErrs + } + + if len(options.DryRun) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .dryRun")) + } + if options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .propagationPolicy")) + } + //nolint:staticcheck // Keep validation for deprecated OrphanDependents option until it's being removed + if options.OrphanDependents != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .orphanDependents")) + } + if options.GracePeriodSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .gracePeriodSeconds")) + } + if options.Preconditions != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .preconditions")) + } + + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index afe01ed5a..82e272240 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio } else { out.DryRun = nil } + if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil { + return err + } + } else { + out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 90cc54a7e..6b0d0dfee 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil { + in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 9e22a0056..fafa81a3d 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -18,6 +18,7 @@ package labels import ( "fmt" + "slices" "sort" "strconv" "strings" @@ -27,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/klog/v2" - stringslices "k8s.io/utils/strings/slices" ) var ( @@ -313,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool { if r.operator != x.operator { return false } - return stringslices.Equal(r.strValues, x.strValues) + return slices.Equal(r.strValues, x.strValues) } // Empty returns true if the internalSelector doesn't restrict selection space diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index cc0a77bba..395dfdbd0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -284,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { func (e *encoderWithAllocator) Identifier() Identifier { return e.encoder.Identifier() } + +type nondeterministicEncoderToEncoderAdapter struct { + NondeterministicEncoder +} + +func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error { + return e.EncodeNondeterministic(obj, w) +} + +// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's +// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the +// provided Encoder as-is. +func UseNondeterministicEncoding(encoder Encoder) Encoder { + if nondeterministic, ok := encoder.(NondeterministicEncoder); ok { + return nondeterministicEncoderToEncoderAdapter{nondeterministic} + } + return encoder +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index e89ea8939..2703300cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -69,6 +69,19 @@ type Encoder interface { Identifier() Identifier } +// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in +// cases where the output does not need to be deterministic. +type NondeterministicEncoder interface { + Encoder + + // EncodeNondeterministic writes an object to the stream. Unlike the Encode method of + // Encoder, EncodeNondeterministic does not guarantee that any two invocations will write + // the same sequence of bytes to the io.Writer. Any differences will not be significant to a + // generic decoder. For example, map entries and struct fields might be encoded in any + // order. + EncodeNondeterministic(Object, io.Writer) error +} + // MemoryAllocator is responsible for allocating memory. // By encapsulating memory allocation into its own interface, we can reuse the memory // across many operations in places we know it can significantly improve the performance. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go new file mode 100644 index 000000000..4d069a903 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go @@ -0,0 +1,389 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cbor + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + util "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/fxamacker/cbor/v2" +) + +type metaFactory interface { + // Interpret should return the version and kind of the wire-format of the object. + Interpret(data []byte) (*schema.GroupVersionKind, error) +} + +type defaultMetaFactory struct{} + +func (mf *defaultMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + var tm metav1.TypeMeta + // The input is expected to include additional map keys besides apiVersion and kind, so use + // lax mode for decoding into TypeMeta. + if err := modes.DecodeLax.Unmarshal(data, &tm); err != nil { + return nil, fmt.Errorf("unable to determine group/version/kind: %w", err) + } + actual := tm.GetObjectKind().GroupVersionKind() + return &actual, nil +} + +type Serializer interface { + runtime.Serializer + runtime.NondeterministicEncoder + recognizer.RecognizingDecoder + + // NewSerializer returns a value of this interface type rather than exporting the serializer + // type and returning one of those because the zero value of serializer isn't ready to + // use. Users aren't intended to implement cbor.Serializer themselves, and this unexported + // interface method is here to prevent that (https://go.dev/blog/module-compatibility). + private() +} + +var _ Serializer = &serializer{} + +type options struct { + strict bool + transcode bool +} + +type Option func(*options) + +// Strict configures a serializer to return a strict decoding error when it encounters map keys that +// do not correspond to a field in the target object of a decode operation. This option is disabled +// by default. +func Strict(s bool) Option { + return func(opts *options) { + opts.strict = s + } +} + +// Transcode configures a serializer to transcode the "raw" bytes of a decoded runtime.RawExtension +// or metav1.FieldsV1 object to JSON. This is enabled by default to support existing programs that +// depend on the assumption that objects of either type contain valid JSON. +func Transcode(s bool) Option { + return func(opts *options) { + opts.transcode = s + } +} + +type serializer struct { + metaFactory metaFactory + creater runtime.ObjectCreater + typer runtime.ObjectTyper + options options +} + +func (serializer) private() {} + +// NewSerializer creates and returns a serializer configured with the provided options. The default +// options are equivalent to explicitly passing Strict(false) and Transcode(true). +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) Serializer { + return newSerializer(&defaultMetaFactory{}, creater, typer, options...) +} + +func newSerializer(metaFactory metaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) *serializer { + s := &serializer{ + metaFactory: metaFactory, + creater: creater, + typer: typer, + } + s.options.transcode = true + for _, o := range options { + o(&s.options) + } + return s +} + +func (s *serializer) Identifier() runtime.Identifier { + return "cbor" +} + +// Encode writes a CBOR representation of the given object. +// +// Because the CBOR data item written by a call to Encode is always enclosed in the "self-described +// CBOR" tag, its encoded form always has the prefix 0xd9d9f7. This prefix is suitable for use as a +// "magic number" for distinguishing encoded CBOR from other protocols. +// +// The default serialization behavior for any given object replicates the behavior of the JSON +// serializer as far as it is necessary to allow the CBOR serializer to be used as a drop-in +// replacement for the JSON serializer, with limited exceptions. For example, the distinction +// between integers and floating-point numbers is preserved in CBOR due to its distinct +// representations for each type. +// +// Objects implementing runtime.Unstructured will have their unstructured content encoded rather +// than following the default behavior for their dynamic type. +func (s *serializer) Encode(obj runtime.Object, w io.Writer) error { + return s.encode(modes.Encode, obj, w) +} + +func (s *serializer) EncodeNondeterministic(obj runtime.Object, w io.Writer) error { + return s.encode(modes.EncodeNondeterministic, obj, w) +} + +func (s *serializer) encode(mode modes.EncMode, obj runtime.Object, w io.Writer) error { + var v interface{} = obj + if u, ok := obj.(runtime.Unstructured); ok { + v = u.UnstructuredContent() + } + + if err := modes.RejectCustomMarshalers(v); err != nil { + return err + } + + if _, err := w.Write(selfDescribedCBOR); err != nil { + return err + } + + return mode.MarshalTo(v, w) +} + +// gvkWithDefaults returns group kind and version defaulting from provided default +func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { + if len(actual.Kind) == 0 { + actual.Kind = defaultGVK.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = defaultGVK.Group + actual.Version = defaultGVK.Version + } + if len(actual.Version) == 0 && actual.Group == defaultGVK.Group { + actual.Version = defaultGVK.Version + } + return actual +} + +// diagnose returns the diagnostic encoding of a well-formed CBOR data item. +func diagnose(data []byte) string { + diag, err := modes.Diagnostic.Diagnose(data) + if err != nil { + // Since the input must already be well-formed CBOR, converting it to diagnostic + // notation should not fail. + util.HandleError(err) + + return hex.EncodeToString(data) + } + return diag +} + +// unmarshal unmarshals CBOR data from the provided byte slice into a Go object. If the decoder is +// configured to report strict errors, the first error return value may be a non-nil strict decoding +// error. If the last error return value is non-nil, then the unmarshal failed entirely and the +// state of the destination object should not be relied on. +func (s *serializer) unmarshal(data []byte, into interface{}) (strict, lax error) { + if u, ok := into.(runtime.Unstructured); ok { + var content map[string]interface{} + defer func() { + switch u := u.(type) { + case *unstructured.UnstructuredList: + // UnstructuredList's implementation of SetUnstructuredContent + // produces different objects than those produced by a decode using + // UnstructuredJSONScheme: + // + // 1. SetUnstructuredContent retains the "items" key in the list's + // Object field. It is omitted from Object when decoding with + // UnstructuredJSONScheme. + // 2. SetUnstructuredContent does not populate "apiVersion" and + // "kind" on each entry of its Items + // field. UnstructuredJSONScheme does, inferring the singular + // Kind from the list Kind. + // 3. SetUnstructuredContent ignores entries of "items" that are + // not JSON objects or are objects without + // "kind". UnstructuredJSONScheme returns an error in either + // case. + // + // UnstructuredJSONScheme's behavior is replicated here. + var items []interface{} + if uncast, present := content["items"]; present { + var cast bool + items, cast = uncast.([]interface{}) + if !cast { + strict, lax = nil, fmt.Errorf("items field of UnstructuredList must be encoded as an array or null if present") + return + } + } + apiVersion, _ := content["apiVersion"].(string) + kind, _ := content["kind"].(string) + kind = strings.TrimSuffix(kind, "List") + var unstructureds []unstructured.Unstructured + if len(items) > 0 { + unstructureds = make([]unstructured.Unstructured, len(items)) + } + for i := range items { + object, cast := items[i].(map[string]interface{}) + if !cast { + strict, lax = nil, fmt.Errorf("elements of the items field of UnstructuredList must be encoded as a map") + return + } + + // As in UnstructuredJSONScheme, only set the heuristic + // singular GVK when both "apiVersion" and "kind" are either + // missing, non-string, or empty. + object["apiVersion"], _ = object["apiVersion"].(string) + object["kind"], _ = object["kind"].(string) + if object["apiVersion"] == "" && object["kind"] == "" { + object["apiVersion"] = apiVersion + object["kind"] = kind + } + + if object["kind"] == "" { + strict, lax = nil, runtime.NewMissingKindErr(diagnose(data)) + return + } + if object["apiVersion"] == "" { + strict, lax = nil, runtime.NewMissingVersionErr(diagnose(data)) + return + } + + unstructureds[i].Object = object + } + delete(content, "items") + u.Object = content + u.Items = unstructureds + default: + u.SetUnstructuredContent(content) + } + }() + into = &content + } else if err := modes.RejectCustomMarshalers(into); err != nil { + return nil, err + } + + if !s.options.strict { + return nil, modes.DecodeLax.Unmarshal(data, into) + } + + err := modes.Decode.Unmarshal(data, into) + // TODO: UnknownFieldError is ambiguous. It only provides the index of the first problematic + // map entry encountered and does not indicate which map the index refers to. + var unknownField *cbor.UnknownFieldError + if errors.As(err, &unknownField) { + // Unlike JSON, there are no strict errors in CBOR for duplicate map keys. CBOR maps + // with duplicate keys are considered invalid according to the spec and are rejected + // entirely. + return runtime.NewStrictDecodingError([]error{unknownField}), modes.DecodeLax.Unmarshal(data, into) + } + return nil, err +} + +func (s *serializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + // A preliminary pass over the input to obtain the actual GVK is redundant on a successful + // decode into Unstructured. + if _, ok := into.(runtime.Unstructured); ok { + if _, unmarshalErr := s.unmarshal(data, into); unmarshalErr != nil { + actual, interpretErr := s.metaFactory.Interpret(data) + if interpretErr != nil { + return nil, nil, interpretErr + } + + if gvk != nil { + *actual = gvkWithDefaults(*actual, *gvk) + } + + return nil, actual, unmarshalErr + } + + actual := into.GetObjectKind().GroupVersionKind() + if len(actual.Kind) == 0 { + return nil, &actual, runtime.NewMissingKindErr(diagnose(data)) + } + if len(actual.Version) == 0 { + return nil, &actual, runtime.NewMissingVersionErr(diagnose(data)) + } + + return into, &actual, nil + } + + actual, err := s.metaFactory.Interpret(data) + if err != nil { + return nil, nil, err + } + + if gvk != nil { + *actual = gvkWithDefaults(*actual, *gvk) + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + if err != nil { + return nil, actual, err + } + *actual = gvkWithDefaults(*actual, types[0]) + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(diagnose(data)) + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr(diagnose(data)) + } + + obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) + if err != nil { + return nil, actual, err + } + + strict, err := s.unmarshal(data, obj) + if err != nil { + return nil, actual, err + } + + if s.options.transcode { + if err := transcodeRawTypes(obj); err != nil { + return nil, actual, err + } + } + + return obj, actual, strict +} + +// selfDescribedCBOR is the CBOR encoding of the head of tag number 55799. This tag, specified in +// RFC 8949 Section 3.4.6 "Self-Described CBOR", encloses all output from the encoder, has no +// special semantics, and is used as a magic number to recognize CBOR-encoded data items. +// +// See https://www.rfc-editor.org/rfc/rfc8949.html#name-self-described-cbor. +var selfDescribedCBOR = []byte{0xd9, 0xd9, 0xf7} + +func (s *serializer) RecognizesData(data []byte) (ok, unknown bool, err error) { + return bytes.HasPrefix(data, selfDescribedCBOR), false, nil +} + +// NewSerializerInfo returns a default SerializerInfo for CBOR using the given creater and typer. +func NewSerializerInfo(creater runtime.ObjectCreater, typer runtime.ObjectTyper) runtime.SerializerInfo { + return runtime.SerializerInfo{ + MediaType: "application/cbor", + MediaTypeType: "application", + MediaTypeSubType: "cbor", + Serializer: NewSerializer(creater, typer), + StrictSerializer: NewSerializer(creater, typer, Strict(true)), + StreamSerializer: &runtime.StreamSerializerInfo{ + Framer: NewFramer(), + Serializer: NewSerializer(creater, typer, Transcode(false)), + }, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go index cd78b1df2..a71a487f9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -23,14 +23,39 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" ) +// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will +// make the same choice as the CBOR implementation of runtime.Serializer. +// +// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its +// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler, +// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless +// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be +// removed in favor of automatic transcoding to CBOR. func Marshal(src interface{}) ([]byte, error) { + if err := modes.RejectCustomMarshalers(src); err != nil { + return nil, err + } return modes.Encode.Marshal(src) } +// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to +// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer. +// +// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its +// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler, +// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless +// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be +// removed in favor of automatic transcoding to CBOR. func Unmarshal(src []byte, dst interface{}) error { + if err := modes.RejectCustomMarshalers(dst); err != nil { + return err + } return modes.Decode.Unmarshal(src, dst) } +// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in +// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to +// be parsed. func Diagnose(src []byte) (string, error) { return modes.Diagnostic.Diagnose(src) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go new file mode 100644 index 000000000..28a733c67 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cbor + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fxamacker/cbor/v2" +) + +// NewFramer returns a runtime.Framer based on RFC 8742 CBOR Sequences. Each frame contains exactly +// one encoded CBOR data item. +func NewFramer() runtime.Framer { + return framer{} +} + +var _ runtime.Framer = framer{} + +type framer struct{} + +func (framer) NewFrameReader(rc io.ReadCloser) io.ReadCloser { + return &frameReader{ + decoder: cbor.NewDecoder(rc), + closer: rc, + } +} + +func (framer) NewFrameWriter(w io.Writer) io.Writer { + // Each data item in a CBOR sequence is self-delimiting (like JSON objects). + return w +} + +type frameReader struct { + decoder *cbor.Decoder + closer io.Closer + + overflow []byte +} + +func (fr *frameReader) Read(dst []byte) (int, error) { + if len(fr.overflow) > 0 { + // We read a frame that was too large for the destination slice in a previous call + // to Read and have bytes left over. + n := copy(dst, fr.overflow) + if n < len(fr.overflow) { + fr.overflow = fr.overflow[n:] + return n, io.ErrShortBuffer + } + fr.overflow = nil + return n, nil + } + + // The Reader contract allows implementations to use all of dst[0:len(dst)] as scratch + // space, even if n < len(dst), but it does not allow implementations to use + // dst[len(dst):cap(dst)]. Slicing it up-front allows us to append to it without worrying + // about overwriting dst[len(dst):cap(dst)]. + m := cbor.RawMessage(dst[0:0:len(dst)]) + if err := fr.decoder.Decode(&m); err != nil { + return 0, err + } + + if len(m) > len(dst) { + // The frame was too big, m has a newly-allocated underlying array to accommodate + // it. + fr.overflow = m[len(dst):] + return copy(dst, m), io.ErrShortBuffer + } + + return len(m), nil +} + +func (fr *frameReader) Close() error { + return fr.closer.Close() +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go index c66931384..5fae14151 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -105,7 +105,7 @@ var Encode = EncMode{ var EncodeNondeterministic = EncMode{ delegate: func() cbor.UserBufferEncMode { opts := Encode.options() - opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0. + opts.Sort = cbor.SortFastShuffle em, err := opts.UserBufferEncMode() if err != nil { panic(err) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go new file mode 100644 index 000000000..09d1340f9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go @@ -0,0 +1,236 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cbor + +import ( + "fmt" + "reflect" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var sharedTranscoders transcoders + +var rawTypeTranscodeFuncs = map[reflect.Type]func(reflect.Value) error{ + reflect.TypeFor[runtime.RawExtension](): func(rv reflect.Value) error { + if !rv.CanAddr() { + return nil + } + re := rv.Addr().Interface().(*runtime.RawExtension) + if re.Raw == nil { + // When Raw is nil it encodes to null. Don't change nil Raw values during + // transcoding, they would have unmarshalled from JSON as nil too. + return nil + } + j, err := re.MarshalJSON() + if err != nil { + return fmt.Errorf("failed to transcode RawExtension to JSON: %w", err) + } + re.Raw = j + return nil + }, + reflect.TypeFor[metav1.FieldsV1](): func(rv reflect.Value) error { + if !rv.CanAddr() { + return nil + } + fields := rv.Addr().Interface().(*metav1.FieldsV1) + if fields.Raw == nil { + // When Raw is nil it encodes to null. Don't change nil Raw values during + // transcoding, they would have unmarshalled from JSON as nil too. + return nil + } + j, err := fields.MarshalJSON() + if err != nil { + return fmt.Errorf("failed to transcode FieldsV1 to JSON: %w", err) + } + fields.Raw = j + return nil + }, +} + +func transcodeRawTypes(v interface{}) error { + if v == nil { + return nil + } + + rv := reflect.ValueOf(v) + return sharedTranscoders.getTranscoder(rv.Type()).fn(rv) +} + +type transcoder struct { + fn func(rv reflect.Value) error +} + +var noop = transcoder{ + fn: func(reflect.Value) error { + return nil + }, +} + +type transcoders struct { + lock sync.RWMutex + m map[reflect.Type]**transcoder +} + +func (ts *transcoders) getTranscoder(rt reflect.Type) transcoder { + ts.lock.RLock() + tpp, ok := ts.m[rt] + ts.lock.RUnlock() + if ok { + return **tpp + } + + ts.lock.Lock() + defer ts.lock.Unlock() + tp := ts.getTranscoderLocked(rt) + return *tp +} + +func (ts *transcoders) getTranscoderLocked(rt reflect.Type) *transcoder { + if tpp, ok := ts.m[rt]; ok { + // A transcoder for this type was cached while waiting to acquire the lock. + return *tpp + } + + // Cache the transcoder now, before populating fn, so that circular references between types + // don't overflow the call stack. + t := new(transcoder) + if ts.m == nil { + ts.m = make(map[reflect.Type]**transcoder) + } + ts.m[rt] = &t + + for rawType, fn := range rawTypeTranscodeFuncs { + if rt == rawType { + t = &transcoder{fn: fn} + return t + } + } + + switch rt.Kind() { + case reflect.Array: + te := ts.getTranscoderLocked(rt.Elem()) + rtlen := rt.Len() + if rtlen == 0 || te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + for i := 0; i < rtlen; i++ { + if err := te.fn(rv.Index(i)); err != nil { + return err + } + } + return nil + } + case reflect.Interface: + // Any interface value might have a dynamic type involving RawExtension. It needs to + // be checked. + t.fn = func(rv reflect.Value) error { + if rv.IsNil() { + return nil + } + rv = rv.Elem() + // The interface element's type is dynamic so its transcoder can't be + // determined statically. + return ts.getTranscoder(rv.Type()).fn(rv) + } + case reflect.Map: + rtk := rt.Key() + tk := ts.getTranscoderLocked(rtk) + rte := rt.Elem() + te := ts.getTranscoderLocked(rte) + if tk == &noop && te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := tk.fn(rvk); err != nil { + return err + } + rve.SetIterValue(iter) + if err := te.fn(rve); err != nil { + return err + } + } + return nil + } + case reflect.Pointer: + te := ts.getTranscoderLocked(rt.Elem()) + if te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + if rv.IsNil() { + return nil + } + return te.fn(rv.Elem()) + } + case reflect.Slice: + te := ts.getTranscoderLocked(rt.Elem()) + if te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + for i := 0; i < rv.Len(); i++ { + if err := te.fn(rv.Index(i)); err != nil { + return err + } + } + return nil + } + case reflect.Struct: + type fieldTranscoder struct { + Index int + Transcoder *transcoder + } + var fieldTranscoders []fieldTranscoder + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + tf := ts.getTranscoderLocked(f.Type) + if tf == &noop { + continue + } + fieldTranscoders = append(fieldTranscoders, fieldTranscoder{Index: i, Transcoder: tf}) + } + if len(fieldTranscoders) == 0 { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + for _, ft := range fieldTranscoders { + if err := ft.Transcoder.fn(rv.Field(ft.Index)); err != nil { + return err + } + } + return nil + } + default: + t = &noop + } + + return t +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index ff9820842..77bb30745 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,9 +17,6 @@ limitations under the License. package serializer import ( - "mime" - "strings" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" @@ -28,41 +25,26 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) -// serializerExtensions are for serializers that are conditionally compiled in -var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} - -type serializerType struct { - AcceptContentTypes []string - ContentType string - FileExtensions []string - // EncodesAsText should be true if this content type can be represented safely in UTF-8 - EncodesAsText bool - - Serializer runtime.Serializer - PrettySerializer runtime.Serializer - StrictSerializer runtime.Serializer - - AcceptStreamContentTypes []string - StreamContentType string - - Framer runtime.Framer - StreamSerializer runtime.Serializer -} - -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo { jsonSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, ) - jsonSerializerType := serializerType{ - AcceptContentTypes: []string{runtime.ContentTypeJSON}, - ContentType: runtime.ContentTypeJSON, - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - - Framer: json.Framer, - StreamSerializer: jsonSerializer, + jsonSerializerType := runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + MediaTypeType: "application", + MediaTypeSubType: "json", + EncodesAsText: true, + Serializer: jsonSerializer, + StrictSerializer: json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, + ), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: jsonSerializer, + Framer: json.Framer, + }, } if options.Pretty { jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( @@ -71,12 +53,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option ) } - strictJSONSerializer := json.NewSerializerWithOptions( - mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, - ) - jsonSerializerType.StrictSerializer = strictJSONSerializer - yamlSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, @@ -88,35 +64,35 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option protoSerializer := protobuf.NewSerializer(scheme, scheme) protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) - serializers := []serializerType{ + serializers := []runtime.SerializerInfo{ jsonSerializerType, { - AcceptContentTypes: []string{runtime.ContentTypeYAML}, - ContentType: runtime.ContentTypeYAML, - FileExtensions: []string{"yaml"}, - EncodesAsText: true, - Serializer: yamlSerializer, - StrictSerializer: strictYAMLSerializer, + MediaType: runtime.ContentTypeYAML, + MediaTypeType: "application", + MediaTypeSubType: "yaml", + EncodesAsText: true, + Serializer: yamlSerializer, + StrictSerializer: strictYAMLSerializer, }, { - AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, - ContentType: runtime.ContentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: protoSerializer, + MediaType: runtime.ContentTypeProtobuf, + MediaTypeType: "application", + MediaTypeSubType: "vnd.kubernetes.protobuf", + Serializer: protoSerializer, // note, strict decoding is unsupported for protobuf, // fall back to regular serializing StrictSerializer: protoSerializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: protoRawSerializer, + StreamSerializer: &runtime.StreamSerializerInfo{ + Serializer: protoRawSerializer, + Framer: protobuf.LengthDelimitedFramer, + }, }, } - for _, fn := range serializerExtensions { - if serializer, ok := fn(scheme); ok { - serializers = append(serializers, serializer) - } + for _, f := range options.serializers { + serializers = append(serializers, f(scheme, scheme)) } + return serializers } @@ -136,6 +112,8 @@ type CodecFactoryOptions struct { Strict bool // Pretty includes a pretty serializer along with the non-pretty one Pretty bool + + serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo } // CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. @@ -162,6 +140,13 @@ func DisableStrict(options *CodecFactoryOptions) { options.Strict = false } +// WithSerializer configures a serializer to be supported in addition to the default serializers. +func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo) CodecFactoryOptionsMutator { + return func(options *CodecFactoryOptions) { + options.serializers = append(options.serializers, f) + } +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and @@ -184,7 +169,7 @@ func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMuta } // newCodecFactory is a helper for testing that allows a different metafactory to be specified. -func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { +func newCodecFactory(scheme *runtime.Scheme, serializers []runtime.SerializerInfo) CodecFactory { decoders := make([]runtime.Decoder, 0, len(serializers)) var accepts []runtime.SerializerInfo alreadyAccepted := make(map[string]struct{}) @@ -192,38 +177,20 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec var legacySerializer runtime.Serializer for _, d := range serializers { decoders = append(decoders, d.Serializer) - for _, mediaType := range d.AcceptContentTypes { - if _, ok := alreadyAccepted[mediaType]; ok { - continue - } - alreadyAccepted[mediaType] = struct{}{} - info := runtime.SerializerInfo{ - MediaType: d.ContentType, - EncodesAsText: d.EncodesAsText, - Serializer: d.Serializer, - PrettySerializer: d.PrettySerializer, - StrictSerializer: d.StrictSerializer, - } - - mediaType, _, err := mime.ParseMediaType(info.MediaType) - if err != nil { - panic(err) - } - parts := strings.SplitN(mediaType, "/", 2) - info.MediaTypeType = parts[0] - info.MediaTypeSubType = parts[1] - - if d.StreamSerializer != nil { - info.StreamSerializer = &runtime.StreamSerializerInfo{ - Serializer: d.StreamSerializer, - EncodesAsText: d.EncodesAsText, - Framer: d.Framer, - } - } - accepts = append(accepts, info) - if mediaType == runtime.ContentTypeJSON { - legacySerializer = d.Serializer - } + if _, ok := alreadyAccepted[d.MediaType]; ok { + continue + } + alreadyAccepted[d.MediaType] = struct{}{} + + acceptedSerializerShallowCopy := d + if d.StreamSerializer != nil { + cloned := *d.StreamSerializer + acceptedSerializerShallowCopy.StreamSerializer = &cloned + } + accepts = append(accepts, acceptedSerializerShallowCopy) + + if d.MediaType == runtime.ContentTypeJSON { + legacySerializer = d.Serializer } } if legacySerializer == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 1680c149f..ca7b7cc2d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -43,10 +43,11 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" - ContentTypeYAML string = "application/yaml" - ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" - ContentTypeCBOR string = "application/cbor" + ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" + ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" // RFC 8949 + ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742 ) // RawExtension is used to hold extensions in external versions. diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go index fe8ecaaff..d338cf213 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -25,5 +25,7 @@ const ( JSONPatchType PatchType = "application/json-patch+json" MergePatchType PatchType = "application/merge-patch+json" StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" - ApplyPatchType PatchType = "application/apply-patch+yaml" + ApplyPatchType PatchType = ApplyYAMLPatchType + ApplyYAMLPatchType PatchType = "application/apply-patch+yaml" + ApplyCBORPatchType PatchType = "application/apply-patch+cbor" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go index 978ffb3c3..de540c82f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -19,11 +19,12 @@ package managedfields import ( "fmt" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/managedfields/internal" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) // FieldManager updates the managed fields and merges applied @@ -32,7 +33,7 @@ type FieldManager = internal.FieldManager // NewDefaultFieldManager creates a new FieldManager that merges apply requests // and update managed fields for other types of requests. -func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (*FieldManager, error) { f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) @@ -43,7 +44,7 @@ func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime // NewDefaultCRDFieldManager creates a new FieldManager specifically for // CRDs. This allows for the possibility of fields which are not defined // in models, as well as having no models defined at all. -func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ *FieldManager, err error) { f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index 786ad991c..3fe36edc9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -19,13 +19,14 @@ package internal import ( "fmt" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" - "sigs.k8s.io/structured-merge-diff/v4/merge" - "sigs.k8s.io/structured-merge-diff/v4/typed" ) type structuredMergeManager struct { @@ -41,7 +42,7 @@ var _ Manager = &structuredMergeManager{} // NewStructuredMergeManager creates a new Manager that merges apply requests // and update managed fields for other types of requests. -func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (Manager, error) { if typeConverter == nil { return nil, fmt.Errorf("typeconverter must not be nil") } @@ -52,8 +53,8 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt groupVersion: gv, hubVersion: hub, updater: merge.Updater{ - Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s - IgnoredFields: resetFields, + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoreFilter: resetFields, }, }, nil } @@ -61,7 +62,7 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt // NewCRDStructuredMergeManager creates a new Manager specifically for // CRDs. This allows for the possibility of fields which are not defined // in models, as well as having no models defined at all. -func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ Manager, err error) { return &structuredMergeManager{ typeConverter: typeConverter, objectConverter: objectConverter, @@ -69,8 +70,8 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r groupVersion: gv, hubVersion: hub, updater: merge.Updater{ - Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), - IgnoredFields: resetFields, + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoreFilter: resetFields, }, }, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 4fe0c5eb2..df374949d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -45,7 +45,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic} // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. // -// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. +// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) @@ -146,7 +146,7 @@ type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. // -// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. +// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index bc387d011..f1634bc0d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -220,26 +220,24 @@ func Forbidden(field *Path, detail string) *Error { return &Error{ErrorTypeForbidden, field.String(), "", detail} } -// TooLong returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. +// TooLong returns a *Error indicating "too long". This is used to report that +// the given value is too long. This is similar to Invalid, but the returned +// error will not include the too-long value. If maxLength is negative, it will +// be included in the message. The value argument is not used. func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} -} - -// TooLongMaxLength returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. If maxLength is negative, no max length will be included in the message. -func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error { var msg string if maxLength >= 0 { - msg = fmt.Sprintf("may not be longer than %d", maxLength) + msg = fmt.Sprintf("may not be more than %d bytes", maxLength) } else { msg = "value is too long" } - return &Error{ErrorTypeTooLong, field.String(), value, msg} + return &Error{ErrorTypeTooLong, field.String(), "", msg} +} + +// TooLongMaxLength returns a *Error indicating "too long". +// Deprecated: Use TooLong instead. +func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error { + return TooLong(field, "", maxLength) } // TooMany returns a *Error indicating "too many". This is used to diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index b32644902..9bc393cf5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -175,6 +175,8 @@ func IsValidLabelValue(value string) []string { } const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?" + const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" // DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) @@ -204,10 +206,14 @@ func IsDNS1123Label(value string) []string { const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*" +const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character" + // DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") +var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$") // IsDNS1123Subdomain tests for a string that conforms to the definition of a // subdomain in DNS (RFC 1123). @@ -222,6 +228,19 @@ func IsDNS1123Subdomain(value string) []string { return errs } +// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string +func IsDNS1123SubdomainWithUnderscore(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmt, "example.com")) + } + return errs +} + const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go index c0b75a983..4bb5f27bf 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -63,7 +63,7 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, con if err != nil { return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) } - codecs := serializer.NewCodecFactory(configScheme) + codecs := serializer.NewCodecFactory(configScheme, serializer.EnableStrict) decoder := codecs.UniversalDecoder() decodedObj, err := runtime.Decode(decoder, data) // we were able to decode the file successfully diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go index 6c1761149..b8bcb6a69 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -207,7 +207,7 @@ func newAdmissionMetrics() *AdmissionMetrics { Namespace: namespace, Subsystem: subsystem, Name: "webhook_fail_open_count", - Help: "Admission webhook fail open count, identified by name and broken out for each admission type (validating or mutating).", + Help: "Admission webhook fail open count, identified by name and broken out for each admission type (validating or admit).", StabilityLevel: metrics.ALPHA, }, []string{"name", "type"}) @@ -217,7 +217,7 @@ func newAdmissionMetrics() *AdmissionMetrics { Namespace: namespace, Subsystem: subsystem, Name: "webhook_request_total", - Help: "Admission webhook request total, identified by name and broken out for each admission type (validating or mutating) and operation. Additional labels specify whether the request was rejected or not and an HTTP status code. Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", + Help: "Admission webhook request total, identified by name and broken out for each admission type (validating or admit) and operation. Additional labels specify whether the request was rejected or not and an HTTP status code. Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", StabilityLevel: metrics.ALPHA, }, []string{"name", "type", "operation", "code", "rejected"}) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/authorizer/caching_authorizer.go similarity index 93% rename from vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go rename to vendor/k8s.io/apiserver/pkg/admission/plugin/authorizer/caching_authorizer.go index ac13dbeee..5dffd9723 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/authorizer/caching_authorizer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package validating +package authorizer import ( "context" @@ -39,7 +39,10 @@ type cachingAuthorizer struct { decisions map[string]authzResult } -func newCachingAuthorizer(in authorizer.Authorizer) authorizer.Authorizer { +// NewCachingAuthorizer returns an authorizer that caches decisions for the duration +// of the authorizers use. Intended to be used for short-lived operations such as +// the handling of a request in the admission chain, and then discarded. +func NewCachingAuthorizer(in authorizer.Authorizer) authorizer.Authorizer { return &cachingAuthorizer{ authorizer: in, decisions: make(map[string]authzResult), diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/activation.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/activation.go new file mode 100644 index 000000000..9771ae9ae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/activation.go @@ -0,0 +1,190 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "fmt" + "github.com/google/cel-go/interpreter" + "math" + "time" + + admissionv1 "k8s.io/api/admission/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/library" +) + +// newActivation creates an activation for CEL admission plugins from the given request, admission chain and +// variable binding information. +func newActivation(compositionCtx CompositionContext, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, namespace *v1.Namespace) (*evaluationActivation, error) { + oldObjectVal, err := objectToResolveVal(versionedAttr.VersionedOldObject) + if err != nil { + return nil, fmt.Errorf("failed to prepare oldObject variable for evaluation: %w", err) + } + objectVal, err := objectToResolveVal(versionedAttr.VersionedObject) + if err != nil { + return nil, fmt.Errorf("failed to prepare object variable for evaluation: %w", err) + } + var paramsVal, authorizerVal, requestResourceAuthorizerVal any + if inputs.VersionedParams != nil { + paramsVal, err = objectToResolveVal(inputs.VersionedParams) + if err != nil { + return nil, fmt.Errorf("failed to prepare params variable for evaluation: %w", err) + } + } + + if inputs.Authorizer != nil { + authorizerVal = library.NewAuthorizerVal(versionedAttr.GetUserInfo(), inputs.Authorizer) + requestResourceAuthorizerVal = library.NewResourceAuthorizerVal(versionedAttr.GetUserInfo(), inputs.Authorizer, versionedAttr) + } + + requestVal, err := convertObjectToUnstructured(request) + if err != nil { + return nil, fmt.Errorf("failed to prepare request variable for evaluation: %w", err) + } + namespaceVal, err := objectToResolveVal(namespace) + if err != nil { + return nil, fmt.Errorf("failed to prepare namespace variable for evaluation: %w", err) + } + va := &evaluationActivation{ + object: objectVal, + oldObject: oldObjectVal, + params: paramsVal, + request: requestVal.Object, + namespace: namespaceVal, + authorizer: authorizerVal, + requestResourceAuthorizer: requestResourceAuthorizerVal, + } + + // composition is an optional feature that only applies for ValidatingAdmissionPolicy and MutatingAdmissionPolicy. + if compositionCtx != nil { + va.variables = compositionCtx.Variables(va) + } + return va, nil +} + +type evaluationActivation struct { + object, oldObject, params, request, namespace, authorizer, requestResourceAuthorizer, variables interface{} +} + +// ResolveName returns a value from the activation by qualified name, or false if the name +// could not be found. +func (a *evaluationActivation) ResolveName(name string) (interface{}, bool) { + switch name { + case ObjectVarName: + return a.object, true + case OldObjectVarName: + return a.oldObject, true + case ParamsVarName: + return a.params, true // params may be null + case RequestVarName: + return a.request, true + case NamespaceVarName: + return a.namespace, true + case AuthorizerVarName: + return a.authorizer, a.authorizer != nil + case RequestResourceAuthorizerVarName: + return a.requestResourceAuthorizer, a.requestResourceAuthorizer != nil + case VariableVarName: // variables always present + return a.variables, true + default: + return nil, false + } +} + +// Parent returns the parent of the current activation, may be nil. +// If non-nil, the parent will be searched during resolve calls. +func (a *evaluationActivation) Parent() interpreter.Activation { + return nil +} + +// Evaluate runs a compiled CEL admission plugin expression using the provided activation and CEL +// runtime cost budget. +func (a *evaluationActivation) Evaluate(ctx context.Context, compositionCtx CompositionContext, compilationResult CompilationResult, remainingBudget int64) (EvaluationResult, int64, error) { + var evaluation = EvaluationResult{} + if compilationResult.ExpressionAccessor == nil { // in case of placeholder + return evaluation, remainingBudget, nil + } + + evaluation.ExpressionAccessor = compilationResult.ExpressionAccessor + if compilationResult.Error != nil { + evaluation.Error = &cel.Error{ + Type: cel.ErrorTypeInvalid, + Detail: fmt.Sprintf("compilation error: %v", compilationResult.Error), + Cause: compilationResult.Error, + } + return evaluation, remainingBudget, nil + } + if compilationResult.Program == nil { + evaluation.Error = &cel.Error{ + Type: cel.ErrorTypeInternal, + Detail: "unexpected internal error compiling expression", + } + return evaluation, remainingBudget, nil + } + t1 := time.Now() + evalResult, evalDetails, err := compilationResult.Program.ContextEval(ctx, a) + // budget may be spent due to lazy evaluation of composited variables + if compositionCtx != nil { + compositionCost := compositionCtx.GetAndResetCost() + if compositionCost > remainingBudget { + return evaluation, -1, &cel.Error{ + Type: cel.ErrorTypeInvalid, + Detail: "validation failed due to running out of cost budget, no further validation rules will be run", + Cause: cel.ErrOutOfBudget, + } + } + remainingBudget -= compositionCost + } + elapsed := time.Since(t1) + evaluation.Elapsed = elapsed + if evalDetails == nil { + return evaluation, -1, &cel.Error{ + Type: cel.ErrorTypeInternal, + Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), + } + } else { + rtCost := evalDetails.ActualCost() + if rtCost == nil { + return evaluation, -1, &cel.Error{ + Type: cel.ErrorTypeInvalid, + Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), + Cause: cel.ErrOutOfBudget, + } + } else { + if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget { + return evaluation, -1, &cel.Error{ + Type: cel.ErrorTypeInvalid, + Detail: "validation failed due to running out of cost budget, no further validation rules will be run", + Cause: cel.ErrOutOfBudget, + } + } + remainingBudget -= int64(*rtCost) + } + } + if err != nil { + evaluation.Error = &cel.Error{ + Type: cel.ErrorTypeInvalid, + Detail: fmt.Sprintf("expression '%v' resulted in error: %v", compilationResult.ExpressionAccessor.GetExpression(), err), + } + } else { + evaluation.EvalResult = evalResult + } + return evaluation, remainingBudget, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go index 06035f6b9..f0fff1304 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/util/version" celconfig "k8s.io/apiserver/pkg/apis/cel" apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/common" "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/cel/library" + "k8s.io/apiserver/pkg/cel/mutation" ) const ( @@ -186,7 +188,7 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op found := false returnTypes := expressionAccessor.ReturnTypes() for _, returnType := range returnTypes { - if ast.OutputType() == returnType || cel.AnyType == returnType { + if ast.OutputType().IsExactType(returnType) || cel.AnyType.IsExactType(returnType) { found = true break } @@ -194,9 +196,9 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op if !found { var reason string if len(returnTypes) == 1 { - reason = fmt.Sprintf("must evaluate to %v", returnTypes[0].String()) + reason = fmt.Sprintf("must evaluate to %v but got %v", returnTypes[0].String(), ast.OutputType().String()) } else { - reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) + reason = fmt.Sprintf("must evaluate to one of %v but got %v", returnTypes, ast.OutputType().String()) } return resultError(reason, apiservercel.ErrorTypeInvalid, nil) @@ -226,46 +228,78 @@ func mustBuildEnvs(baseEnv *environment.EnvSet) variableDeclEnvs { envs := make(variableDeclEnvs, 8) // since the number of variable combinations is small, pre-build a environment for each for _, hasParams := range []bool{false, true} { for _, hasAuthorizer := range []bool{false, true} { + var err error for _, strictCost := range []bool{false, true} { - var envOpts []cel.EnvOption - if hasParams { - envOpts = append(envOpts, cel.Variable(ParamsVarName, cel.DynType)) - } - if hasAuthorizer { - envOpts = append(envOpts, - cel.Variable(AuthorizerVarName, library.AuthorizerType), - cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType)) - } - envOpts = append(envOpts, - cel.Variable(ObjectVarName, cel.DynType), - cel.Variable(OldObjectVarName, cel.DynType), - cel.Variable(NamespaceVarName, namespaceType.CelType()), - cel.Variable(RequestVarName, requestType.CelType())) - - extended, err := baseEnv.Extend( - environment.VersionedOptions{ - // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these - // options should always be present. - IntroducedVersion: version.MajorMinor(1, 0), - EnvOptions: envOpts, - DeclTypes: []*apiservercel.DeclType{ - namespaceType, - requestType, - }, - }, - ) + decl := OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer, StrictCost: strictCost} + envs[decl], err = createEnvForOpts(baseEnv, namespaceType, requestType, decl) if err != nil { - panic(fmt.Sprintf("environment misconfigured: %v", err)) + panic(err) } - if strictCost { - extended, err = extended.Extend(environment.StrictCostOpt) - if err != nil { - panic(fmt.Sprintf("environment misconfigured: %v", err)) - } - } - envs[OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer, StrictCost: strictCost}] = extended + } + // We only need this ObjectTypes where strict cost is true + decl := OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer, StrictCost: true, HasPatchTypes: true} + envs[decl], err = createEnvForOpts(baseEnv, namespaceType, requestType, decl) + if err != nil { + panic(err) } } } return envs } + +func createEnvForOpts(baseEnv *environment.EnvSet, namespaceType *apiservercel.DeclType, requestType *apiservercel.DeclType, opts OptionalVariableDeclarations) (*environment.EnvSet, error) { + var envOpts []cel.EnvOption + envOpts = append(envOpts, + cel.Variable(ObjectVarName, cel.DynType), + cel.Variable(OldObjectVarName, cel.DynType), + cel.Variable(NamespaceVarName, namespaceType.CelType()), + cel.Variable(RequestVarName, requestType.CelType())) + if opts.HasParams { + envOpts = append(envOpts, cel.Variable(ParamsVarName, cel.DynType)) + } + if opts.HasAuthorizer { + envOpts = append(envOpts, + cel.Variable(AuthorizerVarName, library.AuthorizerType), + cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType)) + } + + extended, err := baseEnv.Extend( + environment.VersionedOptions{ + // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these + // options should always be present. + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: envOpts, + DeclTypes: []*apiservercel.DeclType{ + namespaceType, + requestType, + }, + }, + ) + if err != nil { + return nil, fmt.Errorf("environment misconfigured: %w", err) + } + if opts.StrictCost { + extended, err = extended.Extend(environment.StrictCostOpt) + if err != nil { + return nil, fmt.Errorf("environment misconfigured: %w", err) + } + } + + if opts.HasPatchTypes { + extended, err = extended.Extend(hasPatchTypes) + if err != nil { + return nil, fmt.Errorf("environment misconfigured: %w", err) + } + } + return extended, nil +} + +var hasPatchTypes = environment.VersionedOptions{ + // Feature epoch was actually 1.32, but we artificially set it to 1.0 because these + // options should always be present. + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: []cel.EnvOption{ + common.ResolverEnvOption(&mutation.DynamicTypeResolver{}), + environment.UnversionedLib(library.JSONPatch), // for jsonPatch.escape() function + }, +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 9c449ecda..bf8715a14 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -36,15 +36,27 @@ import ( const VariablesTypeName = "kubernetes.variables" +// CompositedCompiler compiles expressions with variable composition. type CompositedCompiler struct { Compiler - FilterCompiler + ConditionCompiler + MutatingCompiler CompositionEnv *CompositionEnv } -type CompositedFilter struct { - Filter +// CompositedConditionEvaluator provides evaluation of a condition expression with variable composition. +// The expressions must return a boolean. +type CompositedConditionEvaluator struct { + ConditionEvaluator + + compositionEnv *CompositionEnv +} + +// CompositedEvaluator provides evaluation of a single expression with variable composition. +// The types that may returned by the expression is determined at compilation time. +type CompositedEvaluator struct { + MutatingEvaluator compositionEnv *CompositionEnv } @@ -64,11 +76,13 @@ func NewCompositedCompilerFromTemplate(context *CompositionEnv) *CompositedCompi CompiledVariables: map[string]CompilationResult{}, } compiler := NewCompiler(context.EnvSet) - filterCompiler := NewFilterCompiler(context.EnvSet) + conditionCompiler := &conditionCompiler{compiler} + mutation := &mutatingCompiler{compiler} return &CompositedCompiler{ - Compiler: compiler, - FilterCompiler: filterCompiler, - CompositionEnv: context, + Compiler: compiler, + ConditionCompiler: conditionCompiler, + MutatingCompiler: mutation, + CompositionEnv: context, } } @@ -85,11 +99,20 @@ func (c *CompositedCompiler) CompileAndStoreVariable(variable NamedExpressionAcc return result } -func (c *CompositedCompiler) Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter { - filter := c.FilterCompiler.Compile(expressions, optionalDecls, envType) - return &CompositedFilter{ - Filter: filter, - compositionEnv: c.CompositionEnv, +func (c *CompositedCompiler) CompileCondition(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) ConditionEvaluator { + condition := c.ConditionCompiler.CompileCondition(expressions, optionalDecls, envType) + return &CompositedConditionEvaluator{ + ConditionEvaluator: condition, + compositionEnv: c.CompositionEnv, + } +} + +// CompileEvaluator compiles an mutatingEvaluator for the given expression, options and environment. +func (c *CompositedCompiler) CompileMutatingEvaluator(expression ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) MutatingEvaluator { + mutation := c.MutatingCompiler.CompileMutatingEvaluator(expression, optionalDecls, envType) + return &CompositedEvaluator{ + MutatingEvaluator: mutation, + compositionEnv: c.CompositionEnv, } } @@ -160,9 +183,9 @@ func (c *compositionContext) Variables(activation any) ref.Val { return lazyMap } -func (f *CompositedFilter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) { +func (f *CompositedConditionEvaluator) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) { ctx = f.compositionEnv.CreateContext(ctx) - return f.Filter.ForInput(ctx, versionedAttr, request, optionalVars, namespace, runtimeCELCostBudget) + return f.ConditionEvaluator.ForInput(ctx, versionedAttr, request, optionalVars, namespace, runtimeCELCostBudget) } func (c *compositionContext) reportCost(cost int64) { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/condition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/condition.go new file mode 100644 index 000000000..f28401f3e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/condition.go @@ -0,0 +1,216 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "reflect" + + admissionv1 "k8s.io/api/admission/v1" + authenticationv1 "k8s.io/api/authentication/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/cel/environment" +) + +// conditionCompiler implement the interface ConditionCompiler. +type conditionCompiler struct { + compiler Compiler +} + +func NewConditionCompiler(env *environment.EnvSet) ConditionCompiler { + return &conditionCompiler{compiler: NewCompiler(env)} +} + +// CompileCondition compiles the cel expressions defined in the ExpressionAccessors into a ConditionEvaluator +func (c *conditionCompiler) CompileCondition(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) ConditionEvaluator { + compilationResults := make([]CompilationResult, len(expressionAccessors)) + for i, expressionAccessor := range expressionAccessors { + if expressionAccessor == nil { + continue + } + compilationResults[i] = c.compiler.CompileCELExpression(expressionAccessor, options, mode) + } + return NewCondition(compilationResults) +} + +// condition implements the ConditionEvaluator interface +type condition struct { + compilationResults []CompilationResult +} + +func NewCondition(compilationResults []CompilationResult) ConditionEvaluator { + return &condition{ + compilationResults, + } +} + +func convertObjectToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { + if obj == nil || reflect.ValueOf(obj).IsNil() { + return &unstructured.Unstructured{Object: nil}, nil + } + ret, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: ret}, nil +} + +func objectToResolveVal(r runtime.Object) (interface{}, error) { + if r == nil || reflect.ValueOf(r).IsNil() { + return nil, nil + } + v, err := convertObjectToUnstructured(r) + if err != nil { + return nil, err + } + return v.Object, nil +} + +// ForInput evaluates the compiled CEL expressions converting them into CELEvaluations +// errors per evaluation are returned on the Evaluation object +// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. +func (c *condition) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, namespace *v1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) { + // TODO: replace unstructured with ref.Val for CEL variables when native type support is available + evaluations := make([]EvaluationResult, len(c.compilationResults)) + var err error + + // if this activation supports composition, we will need the compositionCtx. It may be nil. + compositionCtx, _ := ctx.(CompositionContext) + + activation, err := newActivation(compositionCtx, versionedAttr, request, inputs, namespace) + if err != nil { + return nil, -1, err + } + + remainingBudget := runtimeCELCostBudget + for i, compilationResult := range c.compilationResults { + evaluations[i], remainingBudget, err = activation.Evaluate(ctx, compositionCtx, compilationResult, remainingBudget) + if err != nil { + return nil, -1, err + } + } + + return evaluations, remainingBudget, nil +} + +// TODO: to reuse https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go#L154 +func CreateAdmissionRequest(attr admission.Attributes, equivalentGVR metav1.GroupVersionResource, equivalentKind metav1.GroupVersionKind) *admissionv1.AdmissionRequest { + // Attempting to use same logic as webhook for constructing resource + // GVK, GVR, subresource + // Use the GVK, GVR that the matcher decided was equivalent to that of the request + // https://github.com/kubernetes/kubernetes/blob/90c362b3430bcbbf8f245fadbcd521dab39f1d7c/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go#L182-L210 + gvk := equivalentKind + gvr := equivalentGVR + subresource := attr.GetSubresource() + + requestGVK := attr.GetKind() + requestGVR := attr.GetResource() + requestSubResource := attr.GetSubresource() + + aUserInfo := attr.GetUserInfo() + var userInfo authenticationv1.UserInfo + if aUserInfo != nil { + userInfo = authenticationv1.UserInfo{ + Extra: make(map[string]authenticationv1.ExtraValue), + Groups: aUserInfo.GetGroups(), + UID: aUserInfo.GetUID(), + Username: aUserInfo.GetName(), + } + // Convert the extra information in the user object + for key, val := range aUserInfo.GetExtra() { + userInfo.Extra[key] = authenticationv1.ExtraValue(val) + } + } + + dryRun := attr.IsDryRun() + + return &admissionv1.AdmissionRequest{ + Kind: metav1.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }, + Resource: metav1.GroupVersionResource{ + Group: gvr.Group, + Resource: gvr.Resource, + Version: gvr.Version, + }, + SubResource: subresource, + RequestKind: &metav1.GroupVersionKind{ + Group: requestGVK.Group, + Kind: requestGVK.Kind, + Version: requestGVK.Version, + }, + RequestResource: &metav1.GroupVersionResource{ + Group: requestGVR.Group, + Resource: requestGVR.Resource, + Version: requestGVR.Version, + }, + RequestSubResource: requestSubResource, + Name: attr.GetName(), + Namespace: attr.GetNamespace(), + Operation: admissionv1.Operation(attr.GetOperation()), + UserInfo: userInfo, + // Leave Object and OldObject unset since we don't provide access to them via request + DryRun: &dryRun, + Options: runtime.RawExtension{ + Object: attr.GetOperationOptions(), + }, + } +} + +// CreateNamespaceObject creates a Namespace object that is suitable for the CEL evaluation. +// If the namespace is nil, CreateNamespaceObject returns nil +func CreateNamespaceObject(namespace *v1.Namespace) *v1.Namespace { + if namespace == nil { + return nil + } + + return &v1.Namespace{ + Status: namespace.Status, + Spec: namespace.Spec, + ObjectMeta: metav1.ObjectMeta{ + Name: namespace.Name, + GenerateName: namespace.GenerateName, + Namespace: namespace.Namespace, + UID: namespace.UID, + ResourceVersion: namespace.ResourceVersion, + Generation: namespace.Generation, + CreationTimestamp: namespace.CreationTimestamp, + DeletionTimestamp: namespace.DeletionTimestamp, + DeletionGracePeriodSeconds: namespace.DeletionGracePeriodSeconds, + Labels: namespace.Labels, + Annotations: namespace.Annotations, + Finalizers: namespace.Finalizers, + }, + } +} + +// CompilationErrors returns a list of all the errors from the compilation of the mutatingEvaluator +func (c *condition) CompilationErrors() []error { + compilationErrors := []error{} + for _, result := range c.compilationResults { + if result.Error != nil { + compilationErrors = append(compilationErrors, result.Error) + } + } + return compilationErrors +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go deleted file mode 100644 index 216a474d2..000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go +++ /dev/null @@ -1,361 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "fmt" - "math" - "reflect" - "time" - - "github.com/google/cel-go/interpreter" - - admissionv1 "k8s.io/api/admission/v1" - authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/cel/library" -) - -// filterCompiler implement the interface FilterCompiler. -type filterCompiler struct { - compiler Compiler -} - -func NewFilterCompiler(env *environment.EnvSet) FilterCompiler { - return &filterCompiler{compiler: NewCompiler(env)} -} - -type evaluationActivation struct { - object, oldObject, params, request, namespace, authorizer, requestResourceAuthorizer, variables interface{} -} - -// ResolveName returns a value from the activation by qualified name, or false if the name -// could not be found. -func (a *evaluationActivation) ResolveName(name string) (interface{}, bool) { - switch name { - case ObjectVarName: - return a.object, true - case OldObjectVarName: - return a.oldObject, true - case ParamsVarName: - return a.params, true // params may be null - case RequestVarName: - return a.request, true - case NamespaceVarName: - return a.namespace, true - case AuthorizerVarName: - return a.authorizer, a.authorizer != nil - case RequestResourceAuthorizerVarName: - return a.requestResourceAuthorizer, a.requestResourceAuthorizer != nil - case VariableVarName: // variables always present - return a.variables, true - default: - return nil, false - } -} - -// Parent returns the parent of the current activation, may be nil. -// If non-nil, the parent will be searched during resolve calls. -func (a *evaluationActivation) Parent() interpreter.Activation { - return nil -} - -// Compile compiles the cel expressions defined in the ExpressionAccessors into a Filter -func (c *filterCompiler) Compile(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) Filter { - compilationResults := make([]CompilationResult, len(expressionAccessors)) - for i, expressionAccessor := range expressionAccessors { - if expressionAccessor == nil { - continue - } - compilationResults[i] = c.compiler.CompileCELExpression(expressionAccessor, options, mode) - } - return NewFilter(compilationResults) -} - -// filter implements the Filter interface -type filter struct { - compilationResults []CompilationResult -} - -func NewFilter(compilationResults []CompilationResult) Filter { - return &filter{ - compilationResults, - } -} - -func convertObjectToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { - if obj == nil || reflect.ValueOf(obj).IsNil() { - return &unstructured.Unstructured{Object: nil}, nil - } - ret, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, err - } - return &unstructured.Unstructured{Object: ret}, nil -} - -func objectToResolveVal(r runtime.Object) (interface{}, error) { - if r == nil || reflect.ValueOf(r).IsNil() { - return nil, nil - } - v, err := convertObjectToUnstructured(r) - if err != nil { - return nil, err - } - return v.Object, nil -} - -// ForInput evaluates the compiled CEL expressions converting them into CELEvaluations -// errors per evaluation are returned on the Evaluation object -// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. -func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, namespace *v1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) { - // TODO: replace unstructured with ref.Val for CEL variables when native type support is available - evaluations := make([]EvaluationResult, len(f.compilationResults)) - var err error - - oldObjectVal, err := objectToResolveVal(versionedAttr.VersionedOldObject) - if err != nil { - return nil, -1, err - } - objectVal, err := objectToResolveVal(versionedAttr.VersionedObject) - if err != nil { - return nil, -1, err - } - var paramsVal, authorizerVal, requestResourceAuthorizerVal any - if inputs.VersionedParams != nil { - paramsVal, err = objectToResolveVal(inputs.VersionedParams) - if err != nil { - return nil, -1, err - } - } - - if inputs.Authorizer != nil { - authorizerVal = library.NewAuthorizerVal(versionedAttr.GetUserInfo(), inputs.Authorizer) - requestResourceAuthorizerVal = library.NewResourceAuthorizerVal(versionedAttr.GetUserInfo(), inputs.Authorizer, versionedAttr) - } - - requestVal, err := convertObjectToUnstructured(request) - if err != nil { - return nil, -1, err - } - namespaceVal, err := objectToResolveVal(namespace) - if err != nil { - return nil, -1, err - } - va := &evaluationActivation{ - object: objectVal, - oldObject: oldObjectVal, - params: paramsVal, - request: requestVal.Object, - namespace: namespaceVal, - authorizer: authorizerVal, - requestResourceAuthorizer: requestResourceAuthorizerVal, - } - - // composition is an optional feature that only applies for ValidatingAdmissionPolicy. - // check if the context allows composition - var compositionCtx CompositionContext - var ok bool - if compositionCtx, ok = ctx.(CompositionContext); ok { - va.variables = compositionCtx.Variables(va) - } - - remainingBudget := runtimeCELCostBudget - for i, compilationResult := range f.compilationResults { - var evaluation = &evaluations[i] - if compilationResult.ExpressionAccessor == nil { // in case of placeholder - continue - } - evaluation.ExpressionAccessor = compilationResult.ExpressionAccessor - if compilationResult.Error != nil { - evaluation.Error = &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("compilation error: %v", compilationResult.Error), - Cause: compilationResult.Error, - } - continue - } - if compilationResult.Program == nil { - evaluation.Error = &cel.Error{ - Type: cel.ErrorTypeInternal, - Detail: fmt.Sprintf("unexpected internal error compiling expression"), - } - continue - } - t1 := time.Now() - evalResult, evalDetails, err := compilationResult.Program.ContextEval(ctx, va) - // budget may be spent due to lazy evaluation of composited variables - if compositionCtx != nil { - compositionCost := compositionCtx.GetAndResetCost() - if compositionCost > remainingBudget { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"), - Cause: cel.ErrOutOfBudget, - } - } - remainingBudget -= compositionCost - } - elapsed := time.Since(t1) - evaluation.Elapsed = elapsed - if evalDetails == nil { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInternal, - Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), - } - } else { - rtCost := evalDetails.ActualCost() - if rtCost == nil { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), - Cause: cel.ErrOutOfBudget, - } - } else { - if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"), - Cause: cel.ErrOutOfBudget, - } - } - remainingBudget -= int64(*rtCost) - } - } - if err != nil { - evaluation.Error = &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("expression '%v' resulted in error: %v", compilationResult.ExpressionAccessor.GetExpression(), err), - } - } else { - evaluation.EvalResult = evalResult - } - } - - return evaluations, remainingBudget, nil -} - -// TODO: to reuse https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go#L154 -func CreateAdmissionRequest(attr admission.Attributes, equivalentGVR metav1.GroupVersionResource, equivalentKind metav1.GroupVersionKind) *admissionv1.AdmissionRequest { - // Attempting to use same logic as webhook for constructing resource - // GVK, GVR, subresource - // Use the GVK, GVR that the matcher decided was equivalent to that of the request - // https://github.com/kubernetes/kubernetes/blob/90c362b3430bcbbf8f245fadbcd521dab39f1d7c/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go#L182-L210 - gvk := equivalentKind - gvr := equivalentGVR - subresource := attr.GetSubresource() - - requestGVK := attr.GetKind() - requestGVR := attr.GetResource() - requestSubResource := attr.GetSubresource() - - aUserInfo := attr.GetUserInfo() - var userInfo authenticationv1.UserInfo - if aUserInfo != nil { - userInfo = authenticationv1.UserInfo{ - Extra: make(map[string]authenticationv1.ExtraValue), - Groups: aUserInfo.GetGroups(), - UID: aUserInfo.GetUID(), - Username: aUserInfo.GetName(), - } - // Convert the extra information in the user object - for key, val := range aUserInfo.GetExtra() { - userInfo.Extra[key] = authenticationv1.ExtraValue(val) - } - } - - dryRun := attr.IsDryRun() - - return &admissionv1.AdmissionRequest{ - Kind: metav1.GroupVersionKind{ - Group: gvk.Group, - Kind: gvk.Kind, - Version: gvk.Version, - }, - Resource: metav1.GroupVersionResource{ - Group: gvr.Group, - Resource: gvr.Resource, - Version: gvr.Version, - }, - SubResource: subresource, - RequestKind: &metav1.GroupVersionKind{ - Group: requestGVK.Group, - Kind: requestGVK.Kind, - Version: requestGVK.Version, - }, - RequestResource: &metav1.GroupVersionResource{ - Group: requestGVR.Group, - Resource: requestGVR.Resource, - Version: requestGVR.Version, - }, - RequestSubResource: requestSubResource, - Name: attr.GetName(), - Namespace: attr.GetNamespace(), - Operation: admissionv1.Operation(attr.GetOperation()), - UserInfo: userInfo, - // Leave Object and OldObject unset since we don't provide access to them via request - DryRun: &dryRun, - Options: runtime.RawExtension{ - Object: attr.GetOperationOptions(), - }, - } -} - -// CreateNamespaceObject creates a Namespace object that is suitable for the CEL evaluation. -// If the namespace is nil, CreateNamespaceObject returns nil -func CreateNamespaceObject(namespace *v1.Namespace) *v1.Namespace { - if namespace == nil { - return nil - } - - return &v1.Namespace{ - Status: namespace.Status, - Spec: namespace.Spec, - ObjectMeta: metav1.ObjectMeta{ - Name: namespace.Name, - GenerateName: namespace.GenerateName, - Namespace: namespace.Namespace, - UID: namespace.UID, - ResourceVersion: namespace.ResourceVersion, - Generation: namespace.Generation, - CreationTimestamp: namespace.CreationTimestamp, - DeletionTimestamp: namespace.DeletionTimestamp, - DeletionGracePeriodSeconds: namespace.DeletionGracePeriodSeconds, - Labels: namespace.Labels, - Annotations: namespace.Annotations, - Finalizers: namespace.Finalizers, - }, - } -} - -// CompilationErrors returns a list of all the errors from the compilation of the evaluator -func (e *filter) CompilationErrors() []error { - compilationErrors := []error{} - for _, result := range e.compilationResults { - if result.Error != nil { - compilationErrors = append(compilationErrors, result.Error) - } - } - return compilationErrors -} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go index ae61dc826..a9e35a226 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go @@ -63,12 +63,15 @@ type OptionalVariableDeclarations struct { HasAuthorizer bool // StrictCost specifies if the CEL cost limitation is strict for extended libraries as well as native libraries. StrictCost bool + // HasPatchTypes specifies if JSONPatch, Object, Object.metadata and similar types are available in CEL. These can be used + // to initialize the typed objects in CEL required to create patches. + HasPatchTypes bool } -// FilterCompiler contains a function to assist with converting types and values to/from CEL-typed values. -type FilterCompiler interface { - // Compile is used for the cel expression compilation - Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter +// ConditionCompiler contains a function to assist with converting types and values to/from CEL-typed values. +type ConditionCompiler interface { + // CompileCondition is used for the cel expression compilation + CompileCondition(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) ConditionEvaluator } // OptionalVariableBindings provides expression bindings for optional CEL variables. @@ -82,16 +85,38 @@ type OptionalVariableBindings struct { Authorizer authorizer.Authorizer } -// Filter contains a function to evaluate compiled CEL-typed values +// ConditionEvaluator contains the result of compiling a CEL expression +// that evaluates to a condition. This is used both for validation and pre-conditions. // It expects the inbound object to already have been converted to the version expected // by the underlying CEL code (which is indicated by the match criteria of a policy definition). // versionedParams may be nil. -type Filter interface { +type ConditionEvaluator interface { // ForInput converts compiled CEL-typed values into evaluated CEL-typed value. // runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. - // If cost budget is calculated, the filter should return the remaining budget. + // If cost budget is calculated, the condition should return the remaining budget. ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) - // CompilationErrors returns a list of errors from the compilation of the evaluator + // CompilationErrors returns a list of errors from the compilation of the mutatingEvaluator + CompilationErrors() []error +} + +// MutatingCompiler contains a function to assist with converting types and values to/from CEL-typed values. +type MutatingCompiler interface { + // CompileMutatingEvaluator is used for the cel expression compilation + CompileMutatingEvaluator(expression ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) MutatingEvaluator +} + +// MutatingEvaluator contains the result of compiling a CEL expression +// that evaluates to a mutation. +// It expects the inbound object to already have been converted to the version expected +// by the underlying CEL code (which is indicated by the match criteria of a policy definition). +// versionedParams may be nil. +type MutatingEvaluator interface { + // ForInput converts compiled CEL-typed values into a CEL-typed value representing a mutation. + // runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. + // If cost budget is calculated, the condition should return the remaining budget. + ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) (EvaluationResult, int64, error) + + // CompilationErrors returns a list of errors from the compilation of the mutatingEvaluator CompilationErrors() []error } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/mutation.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/mutation.go new file mode 100644 index 000000000..8c609b944 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/mutation.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + + admissionv1 "k8s.io/api/admission/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/cel/environment" +) + +// mutatingCompiler provides a MutatingCompiler implementation. +type mutatingCompiler struct { + compiler Compiler +} + +// CompileMutatingEvaluator compiles a CEL expression for admission plugins and returns an MutatingEvaluator for executing the +// compiled CEL expression. +func (p *mutatingCompiler) CompileMutatingEvaluator(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) MutatingEvaluator { + compilationResult := p.compiler.CompileCELExpression(expressionAccessor, options, mode) + return NewMutatingEvaluator(compilationResult) +} + +type mutatingEvaluator struct { + compilationResult CompilationResult +} + +func NewMutatingEvaluator(compilationResult CompilationResult) MutatingEvaluator { + return &mutatingEvaluator{compilationResult} +} + +// ForInput evaluates the compiled CEL expression and returns an evaluation result +// errors per evaluation are returned in the evaluation result +// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. +func (p *mutatingEvaluator) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, namespace *v1.Namespace, runtimeCELCostBudget int64) (EvaluationResult, int64, error) { + // if this activation supports composition, we will need the compositionCtx. It may be nil. + compositionCtx, _ := ctx.(CompositionContext) + + activation, err := newActivation(compositionCtx, versionedAttr, request, inputs, namespace) + if err != nil { + return EvaluationResult{}, -1, err + } + evaluation, remainingBudget, err := activation.Evaluate(ctx, compositionCtx, p.compilationResult, runtimeCELCostBudget) + if err != nil { + return evaluation, -1, err + } + return evaluation, remainingBudget, nil + +} + +// CompilationErrors returns a list of all the errors from the compilation of the mutatingEvaluator +func (p *mutatingEvaluator) CompilationErrors() (compilationErrors []error) { + if p.compilationResult.Error != nil { + return []error{p.compilationResult.Error} + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go index 85b18612f..515634f00 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go @@ -26,6 +26,7 @@ type PolicyAccessor interface { GetNamespace() string GetParamKind() *v1.ParamKind GetMatchConstraints() *v1.MatchResources + GetFailurePolicy() *v1.FailurePolicyType } type BindingAccessor interface { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go index d4dbfb0aa..29e6eee67 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go @@ -49,6 +49,9 @@ type Source[H Hook] interface { // Dispatcher dispatches evaluates an admission request against the currently // active hooks returned by the source. type Dispatcher[H Hook] interface { + // Start the dispatcher. This method should be called only once at startup. + Start(ctx context.Context) error + // Dispatch a request to the policies. Dispatcher may choose not to // call a hook, either because the rules of the hook does not match, or // the namespaceSelector or the objectSelector of the hook does not diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go index ed1c621bc..03aebdd58 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go @@ -36,8 +36,9 @@ import ( ) // H is the Hook type generated by the source and consumed by the dispatcher. +// !TODO: Just pass in a Plugin[H] with accessors to all this information type sourceFactory[H any] func(informers.SharedInformerFactory, kubernetes.Interface, dynamic.Interface, meta.RESTMapper) Source[H] -type dispatcherFactory[H any] func(authorizer.Authorizer, *matching.Matcher) Dispatcher[H] +type dispatcherFactory[H any] func(authorizer.Authorizer, *matching.Matcher, kubernetes.Interface) Dispatcher[H] // admissionResources is the list of resources related to CEL-based admission // features. @@ -170,7 +171,7 @@ func (c *Plugin[H]) ValidateInitialization() error { } c.source = c.sourceFactory(c.informerFactory, c.client, c.dynamicClient, c.restMapper) - c.dispatcher = c.dispatcherFactory(c.authorizer, c.matcher) + c.dispatcher = c.dispatcherFactory(c.authorizer, c.matcher, c.client) pluginContext, pluginContextCancel := context.WithCancel(context.Background()) go func() { @@ -181,10 +182,15 @@ func (c *Plugin[H]) ValidateInitialization() error { go func() { err := c.source.Run(pluginContext) if err != nil && !errors.Is(err, context.Canceled) { - utilruntime.HandleError(fmt.Errorf("policy source context unexpectedly closed: %v", err)) + utilruntime.HandleError(fmt.Errorf("policy source context unexpectedly closed: %w", err)) } }() + err := c.dispatcher.Start(pluginContext) + if err != nil && !errors.Is(err, context.Canceled) { + utilruntime.HandleError(fmt.Errorf("policy dispatcher context unexpectedly closed: %w", err)) + } + c.SetReadyFunc(func() bool { return namespaceInformer.Informer().HasSynced() && c.source.HasSynced() }) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go index 62ed7bc6c..62214a309 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go @@ -36,7 +36,7 @@ import ( "k8s.io/client-go/tools/cache" ) -// A policy invocation is a single policy-binding-param tuple from a Policy Hook +// PolicyInvocation is a single policy-binding-param tuple from a Policy Hook // in the context of a specific request. The params have already been resolved // and any error in configuration or setting up the invocation is stored in // the Error field. @@ -62,10 +62,6 @@ type PolicyInvocation[P runtime.Object, B runtime.Object, E Evaluator] struct { // Params fetched by the binding to use to evaluate the policy Param runtime.Object - - // Error is set if there was an error with the policy or binding or its - // params, etc - Error error } // dispatcherDelegate is called during a request with a pre-filtered list @@ -76,7 +72,7 @@ type PolicyInvocation[P runtime.Object, B runtime.Object, E Evaluator] struct { // // The delegate provides the "validation" or "mutation" aspect of dispatcher functionality // (in contrast to generic.PolicyDispatcher which only selects active policies and params) -type dispatcherDelegate[P, B runtime.Object, E Evaluator] func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, versionedAttributes webhookgeneric.VersionedAttributeAccessor, invocations []PolicyInvocation[P, B, E]) error +type dispatcherDelegate[P, B runtime.Object, E Evaluator] func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, versionedAttributes webhookgeneric.VersionedAttributeAccessor, invocations []PolicyInvocation[P, B, E]) ([]PolicyError, *apierrors.StatusError) type policyDispatcher[P runtime.Object, B runtime.Object, E Evaluator] struct { newPolicyAccessor func(P) PolicyAccessor @@ -104,7 +100,10 @@ func NewPolicyDispatcher[P runtime.Object, B runtime.Object, E Evaluator]( // request. It then resolves all params and creates an Invocation for each // matching policy-binding-param tuple. The delegate is then called with the // list of tuples. -// +func (d *policyDispatcher[P, B, E]) Start(ctx context.Context) error { + return nil +} + // Note: MatchConditions expressions are not evaluated here. The dispatcher delegate // is expected to ignore the result of any policies whose match conditions dont pass. // This may be possible to refactor so matchconditions are checked here instead. @@ -117,29 +116,33 @@ func (d *policyDispatcher[P, B, E]) Dispatch(ctx context.Context, a admission.At objectInterfaces: o, } + var policyErrors []PolicyError + addConfigError := func(err error, definition PolicyAccessor, binding BindingAccessor) { + var message error + if binding == nil { + message = fmt.Errorf("failed to configure policy: %w", err) + } else { + message = fmt.Errorf("failed to configure binding: %w", err) + } + + policyErrors = append(policyErrors, PolicyError{ + Policy: definition, + Binding: binding, + Message: message, + }) + } + for _, hook := range hooks { policyAccessor := d.newPolicyAccessor(hook.Policy) matches, matchGVR, matchGVK, err := d.matcher.DefinitionMatches(a, o, policyAccessor) if err != nil { // There was an error evaluating if this policy matches anything. - utilruntime.HandleError(err) - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Error: err, - }) + addConfigError(err, policyAccessor, nil) continue } else if !matches { continue } else if hook.ConfigurationError != nil { - // The policy matches but there is a configuration error with the - // policy itself - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Error: hook.ConfigurationError, - Resource: matchGVR, - Kind: matchGVK, - }) - utilruntime.HandleError(hook.ConfigurationError) + addConfigError(hook.ConfigurationError, policyAccessor, nil) continue } @@ -148,19 +151,22 @@ func (d *policyDispatcher[P, B, E]) Dispatch(ctx context.Context, a admission.At matches, err = d.matcher.BindingMatches(a, o, bindingAccessor) if err != nil { // There was an error evaluating if this binding matches anything. - utilruntime.HandleError(err) - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Binding: binding, - Error: err, - Resource: matchGVR, - Kind: matchGVK, - }) + addConfigError(err, policyAccessor, bindingAccessor) continue } else if !matches { continue } + // here the binding matches. + // VersionedAttr result will be cached and reused later during parallel + // hook calls. + if _, err = versionedAttrAccessor.VersionedAttribute(matchGVK); err != nil { + // VersionedAttr result will be cached and reused later during parallel + // hook calls. + addConfigError(err, policyAccessor, nil) + continue + } + // Collect params for this binding params, err := CollectParams( policyAccessor.GetParamKind(), @@ -171,14 +177,7 @@ func (d *policyDispatcher[P, B, E]) Dispatch(ctx context.Context, a admission.At ) if err != nil { // There was an error collecting params for this binding. - utilruntime.HandleError(err) - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Binding: binding, - Error: err, - Resource: matchGVR, - Kind: matchGVK, - }) + addConfigError(err, policyAccessor, bindingAccessor) continue } @@ -194,23 +193,72 @@ func (d *policyDispatcher[P, B, E]) Dispatch(ctx context.Context, a admission.At Evaluator: hook.Evaluator, }) } + } + } - // VersionedAttr result will be cached and reused later during parallel - // hook calls - _, err = versionedAttrAccessor.VersionedAttribute(matchGVK) - if err != nil { - return apierrors.NewInternalError(err) - } + if len(relevantHooks) > 0 { + extraPolicyErrors, statusError := d.delegate(ctx, a, o, versionedAttrAccessor, relevantHooks) + if statusError != nil { + return statusError } + policyErrors = append(policyErrors, extraPolicyErrors...) + } + var filteredErrors []PolicyError + for _, e := range policyErrors { + // we always default the FailurePolicy if it is unset and validate it in API level + var policy v1.FailurePolicyType + if fp := e.Policy.GetFailurePolicy(); fp == nil { + policy = v1.Fail + } else { + policy = *fp + } + + switch policy { + case v1.Ignore: + // TODO: add metrics for ignored error here + continue + case v1.Fail: + filteredErrors = append(filteredErrors, e) + default: + filteredErrors = append(filteredErrors, e) + } } - if len(relevantHooks) == 0 { - // no matching hooks - return nil + if len(filteredErrors) > 0 { + + forbiddenErr := admission.NewForbidden(a, fmt.Errorf("admission request denied by policy")) + + // The forbiddenErr is always a StatusError. + var err *apierrors.StatusError + if !errors.As(forbiddenErr, &err) { + // Should never happen. + return apierrors.NewInternalError(fmt.Errorf("failed to create status error")) + } + err.ErrStatus.Message = "" + + for _, policyError := range filteredErrors { + message := policyError.Error() + + // If this is the first denied decision, use its message and reason + // for the status error message. + if err.ErrStatus.Message == "" { + err.ErrStatus.Message = message + if policyError.Reason != "" { + err.ErrStatus.Reason = policyError.Reason + } + } + + // Add the denied decision's message to the status error's details + err.ErrStatus.Details.Causes = append( + err.ErrStatus.Details.Causes, + metav1.StatusCause{Message: message}) + } + + return err } - return d.delegate(ctx, a, o, versionedAttrAccessor, relevantHooks) + return nil } // Returns params to use to evaluate a policy-binding with given param @@ -352,3 +400,18 @@ func (v *versionedAttributeAccessor) VersionedAttribute(gvk schema.GroupVersionK v.versionedAttrs[gvk] = versionedAttr return versionedAttr, nil } + +type PolicyError struct { + Policy PolicyAccessor + Binding BindingAccessor + Message error + Reason metav1.StatusReason +} + +func (c PolicyError) Error() string { + if c.Binding != nil { + return fmt.Sprintf("policy '%s' with binding '%s' denied request: %s", c.Policy.GetName(), c.Binding.GetName(), c.Message.Error()) + } + + return fmt.Sprintf("policy %q denied request: %s", c.Policy.GetName(), c.Message.Error()) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go index 9b2e2146a..ca6cdc884 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go @@ -41,6 +41,13 @@ import ( "k8s.io/klog/v2" ) +// Interval for refreshing policies. +// TODO: Consider reducing this to a shorter duration or replacing this entirely +// with checks that detect when a policy change took effect. +const policyRefreshIntervalDefault = 1 * time.Second + +var policyRefreshInterval = policyRefreshIntervalDefault + type policySource[P runtime.Object, B runtime.Object, E Evaluator] struct { ctx context.Context policyInformer generic.Informer[P] @@ -122,6 +129,15 @@ func NewPolicySource[P runtime.Object, B runtime.Object, E Evaluator]( return res } +// SetPolicyRefreshIntervalForTests allows the refresh interval to be overridden during tests. +// This should only be called from tests. +func SetPolicyRefreshIntervalForTests(interval time.Duration) func() { + policyRefreshInterval = interval + return func() { + policyRefreshInterval = policyRefreshIntervalDefault + } +} + func (s *policySource[P, B, E]) Run(ctx context.Context) error { if s.ctx != nil { return fmt.Errorf("policy source already running") @@ -178,7 +194,7 @@ func (s *policySource[P, B, E]) Run(ctx context.Context) error { // and needs to be recompiled go func() { // Loop every 1 second until context is cancelled, refreshing policies - wait.Until(s.refreshPolicies, 1*time.Second, ctx.Done()) + wait.Until(s.refreshPolicies, policyRefreshInterval, ctx.Done()) }() <-ctx.Done() diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go index 69b19fb2a..964f2d904 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go @@ -45,7 +45,6 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/initializer" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/features" ) // PolicyTestContext is everything you need to unit test a policy plugin @@ -196,18 +195,6 @@ func NewPolicyTestContext[P, B runtime.Object, E Evaluator]( plugin.SetEnabled(true) featureGate := featuregate.NewFeatureGate() - err = featureGate.Add(map[featuregate.Feature]featuregate.FeatureSpec{ - //!TODO: move this to validating specific tests - features.ValidatingAdmissionPolicy: { - Default: true, PreRelease: featuregate.Beta}}) - if err != nil { - return nil, nil, err - } - err = featureGate.SetFromMap(map[string]bool{string(features.ValidatingAdmissionPolicy): true}) - if err != nil { - return nil, nil, err - } - testContext, testCancel := context.WithCancel(context.Background()) genericInitializer := initializer.New( nativeClient, diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/accessor.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/accessor.go new file mode 100644 index 000000000..e5ef242fa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/accessor.go @@ -0,0 +1,144 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/admission/plugin/policy/generic" +) + +func NewMutatingAdmissionPolicyAccessor(obj *Policy) generic.PolicyAccessor { + return &mutatingAdmissionPolicyAccessor{ + Policy: obj, + } +} + +func NewMutatingAdmissionPolicyBindingAccessor(obj *PolicyBinding) generic.BindingAccessor { + return &mutatingAdmissionPolicyBindingAccessor{ + PolicyBinding: obj, + } +} + +type mutatingAdmissionPolicyAccessor struct { + *Policy +} + +func (v *mutatingAdmissionPolicyAccessor) GetNamespace() string { + return v.Namespace +} + +func (v *mutatingAdmissionPolicyAccessor) GetName() string { + return v.Name +} + +func (v *mutatingAdmissionPolicyAccessor) GetParamKind() *v1.ParamKind { + pk := v.Spec.ParamKind + if pk == nil { + return nil + } + return &v1.ParamKind{ + APIVersion: pk.APIVersion, + Kind: pk.Kind, + } +} + +func (v *mutatingAdmissionPolicyAccessor) GetMatchConstraints() *v1.MatchResources { + return convertV1alpha1ResourceRulesToV1(v.Spec.MatchConstraints) +} + +func (v *mutatingAdmissionPolicyAccessor) GetFailurePolicy() *v1.FailurePolicyType { + return toV1FailurePolicy(v.Spec.FailurePolicy) +} + +func toV1FailurePolicy(failurePolicy *v1alpha1.FailurePolicyType) *v1.FailurePolicyType { + if failurePolicy == nil { + return nil + } + fp := v1.FailurePolicyType(*failurePolicy) + return &fp +} + +type mutatingAdmissionPolicyBindingAccessor struct { + *PolicyBinding +} + +func (v *mutatingAdmissionPolicyBindingAccessor) GetNamespace() string { + return v.Namespace +} + +func (v *mutatingAdmissionPolicyBindingAccessor) GetName() string { + return v.Name +} + +func (v *mutatingAdmissionPolicyBindingAccessor) GetPolicyName() types.NamespacedName { + return types.NamespacedName{ + Namespace: "", + Name: v.Spec.PolicyName, + } +} + +func (v *mutatingAdmissionPolicyBindingAccessor) GetMatchResources() *v1.MatchResources { + return convertV1alpha1ResourceRulesToV1(v.Spec.MatchResources) +} + +func (v *mutatingAdmissionPolicyBindingAccessor) GetParamRef() *v1.ParamRef { + if v.Spec.ParamRef == nil { + return nil + } + + var nfa *v1.ParameterNotFoundActionType + if v.Spec.ParamRef.ParameterNotFoundAction != nil { + nfa = new(v1.ParameterNotFoundActionType) + *nfa = v1.ParameterNotFoundActionType(*v.Spec.ParamRef.ParameterNotFoundAction) + } + + return &v1.ParamRef{ + Name: v.Spec.ParamRef.Name, + Namespace: v.Spec.ParamRef.Namespace, + Selector: v.Spec.ParamRef.Selector, + ParameterNotFoundAction: nfa, + } +} + +func convertV1alpha1ResourceRulesToV1(mc *v1alpha1.MatchResources) *v1.MatchResources { + if mc == nil { + return nil + } + + var res v1.MatchResources + res.NamespaceSelector = mc.NamespaceSelector + res.ObjectSelector = mc.ObjectSelector + for _, ex := range mc.ExcludeResourceRules { + res.ExcludeResourceRules = append(res.ExcludeResourceRules, v1.NamedRuleWithOperations{ + ResourceNames: ex.ResourceNames, + RuleWithOperations: ex.RuleWithOperations, + }) + } + for _, ex := range mc.ResourceRules { + res.ResourceRules = append(res.ResourceRules, v1.NamedRuleWithOperations{ + ResourceNames: ex.ResourceNames, + RuleWithOperations: ex.RuleWithOperations, + }) + } + if mc.MatchPolicy != nil { + mp := v1.MatchPolicyType(*mc.MatchPolicy) + res.MatchPolicy = &mp + } + return &res +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/compilation.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/compilation.go new file mode 100644 index 000000000..710b8ef1e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/compilation.go @@ -0,0 +1,81 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "fmt" + + "k8s.io/api/admissionregistration/v1alpha1" + plugincel "k8s.io/apiserver/pkg/admission/plugin/cel" + "k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch" + "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" +) + +// compilePolicy compiles the policy into a PolicyEvaluator +// any error is stored and delayed until invocation. +// +// Each individual mutation is compiled into MutationEvaluationFunc and +// returned is a PolicyEvaluator in the same order as the mutations appeared in the policy. +func compilePolicy(policy *Policy) PolicyEvaluator { + opts := plugincel.OptionalVariableDeclarations{HasParams: policy.Spec.ParamKind != nil, StrictCost: true, HasAuthorizer: true} + compiler, err := plugincel.NewCompositedCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) + if err != nil { + return PolicyEvaluator{Error: &apiservercel.Error{ + Type: apiservercel.ErrorTypeInternal, + Detail: fmt.Sprintf("failed to initialize CEL compiler: %v", err), + }} + } + + // Compile and store variables + compiler.CompileAndStoreVariables(convertv1alpha1Variables(policy.Spec.Variables), opts, environment.StoredExpressions) + + // Compile matchers + var matcher matchconditions.Matcher = nil + matchConditions := policy.Spec.MatchConditions + if len(matchConditions) > 0 { + matchExpressionAccessors := make([]plugincel.ExpressionAccessor, len(matchConditions)) + for i := range matchConditions { + matchExpressionAccessors[i] = (*matchconditions.MatchCondition)(&matchConditions[i]) + } + matcher = matchconditions.NewMatcher(compiler.CompileCondition(matchExpressionAccessors, opts, environment.StoredExpressions), toV1FailurePolicy(policy.Spec.FailurePolicy), "policy", "validate", policy.Name) + } + + // Compiler patchers + var patchers []patch.Patcher + patchOptions := opts + patchOptions.HasPatchTypes = true + for _, m := range policy.Spec.Mutations { + switch m.PatchType { + case v1alpha1.PatchTypeJSONPatch: + if m.JSONPatch != nil { + accessor := &patch.JSONPatchCondition{Expression: m.JSONPatch.Expression} + compileResult := compiler.CompileMutatingEvaluator(accessor, patchOptions, environment.StoredExpressions) + patchers = append(patchers, patch.NewJSONPatcher(compileResult)) + } + case v1alpha1.PatchTypeApplyConfiguration: + if m.ApplyConfiguration != nil { + accessor := &patch.ApplyConfigurationCondition{Expression: m.ApplyConfiguration.Expression} + compileResult := compiler.CompileMutatingEvaluator(accessor, patchOptions, environment.StoredExpressions) + patchers = append(patchers, patch.NewApplyConfigurationPatcher(compileResult)) + } + } + } + + return PolicyEvaluator{Matcher: matcher, Mutators: patchers, CompositionEnv: compiler.CompositionEnv} +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/dispatcher.go new file mode 100644 index 000000000..918a07d0f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/dispatcher.go @@ -0,0 +1,295 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + "errors" + "fmt" + + "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/admission" + admissionauthorizer "k8s.io/apiserver/pkg/admission/plugin/authorizer" + "k8s.io/apiserver/pkg/admission/plugin/cel" + "k8s.io/apiserver/pkg/admission/plugin/policy/generic" + "k8s.io/apiserver/pkg/admission/plugin/policy/matching" + "k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch" + webhookgeneric "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + celconfig "k8s.io/apiserver/pkg/apis/cel" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +func NewDispatcher(a authorizer.Authorizer, m *matching.Matcher, tcm patch.TypeConverterManager) generic.Dispatcher[PolicyHook] { + res := &dispatcher{ + matcher: m, + authz: a, + typeConverterManager: tcm, + } + res.Dispatcher = generic.NewPolicyDispatcher[*Policy, *PolicyBinding, PolicyEvaluator]( + NewMutatingAdmissionPolicyAccessor, + NewMutatingAdmissionPolicyBindingAccessor, + m, + res.dispatchInvocations, + ) + return res +} + +type dispatcher struct { + matcher *matching.Matcher + authz authorizer.Authorizer + typeConverterManager patch.TypeConverterManager + generic.Dispatcher[PolicyHook] +} + +func (d *dispatcher) Start(ctx context.Context) error { + go d.typeConverterManager.Run(ctx) + return d.Dispatcher.Start(ctx) +} + +func (d *dispatcher) dispatchInvocations( + ctx context.Context, + a admission.Attributes, + o admission.ObjectInterfaces, + versionedAttributes webhookgeneric.VersionedAttributeAccessor, + invocations []generic.PolicyInvocation[*Policy, *PolicyBinding, PolicyEvaluator], +) ([]generic.PolicyError, *k8serrors.StatusError) { + var lastVersionedAttr *admission.VersionedAttributes + + reinvokeCtx := a.GetReinvocationContext() + var policyReinvokeCtx *policyReinvokeContext + if v := reinvokeCtx.Value(PluginName); v != nil { + policyReinvokeCtx = v.(*policyReinvokeContext) + } else { + policyReinvokeCtx = &policyReinvokeContext{} + reinvokeCtx.SetValue(PluginName, policyReinvokeCtx) + } + + if reinvokeCtx.IsReinvoke() && policyReinvokeCtx.IsOutputChangedSinceLastPolicyInvocation(a.GetObject()) { + // If the object has changed, we know the in-tree plugin re-invocations have mutated the object, + // and we need to reinvoke all eligible policies. + policyReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + } + defer func() { + policyReinvokeCtx.SetLastPolicyInvocationOutput(a.GetObject()) + }() + + var policyErrors []generic.PolicyError + addConfigError := func(err error, invocation generic.PolicyInvocation[*Policy, *PolicyBinding, PolicyEvaluator], reason metav1.StatusReason) { + policyErrors = append(policyErrors, generic.PolicyError{ + Message: err, + Policy: NewMutatingAdmissionPolicyAccessor(invocation.Policy), + Binding: NewMutatingAdmissionPolicyBindingAccessor(invocation.Binding), + Reason: reason, + }) + } + + // There is at least one invocation to invoke. Make sure we have a namespace + // object if the incoming object is not cluster scoped to pass into the evaluator. + var namespace *v1.Namespace + var err error + namespaceName := a.GetNamespace() + + // Special case, the namespace object has the namespace of itself (maybe a bug). + // unset it if the incoming object is a namespace + if gvk := a.GetKind(); gvk.Kind == "Namespace" && gvk.Version == "v1" && gvk.Group == "" { + namespaceName = "" + } + + // if it is cluster scoped, namespaceName will be empty + // Otherwise, get the Namespace resource. + if namespaceName != "" { + namespace, err = d.matcher.GetNamespace(namespaceName) + if err != nil { + return nil, k8serrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, namespaceName) + } + } + + authz := admissionauthorizer.NewCachingAuthorizer(d.authz) + + // Should loop through invocations, handling possible error and invoking + // evaluator to apply patch, also should handle re-invocations + for _, invocation := range invocations { + if invocation.Evaluator.CompositionEnv != nil { + ctx = invocation.Evaluator.CompositionEnv.CreateContext(ctx) + } + if len(invocation.Evaluator.Mutators) != len(invocation.Policy.Spec.Mutations) { + // This would be a bug. The compiler should always return exactly as + // many evaluators as there are mutations + return nil, k8serrors.NewInternalError(fmt.Errorf("expected %v compiled evaluators for policy %v, got %v", + len(invocation.Policy.Spec.Mutations), invocation.Policy.Name, len(invocation.Evaluator.Mutators))) + } + + versionedAttr, err := versionedAttributes.VersionedAttribute(invocation.Kind) + if err != nil { + // This should never happen, we pre-warm versoined attribute + // accessors before starting the dispatcher + return nil, k8serrors.NewInternalError(err) + } + + if invocation.Evaluator.Matcher != nil { + matchResults := invocation.Evaluator.Matcher.Match(ctx, versionedAttr, invocation.Param, authz) + if matchResults.Error != nil { + addConfigError(matchResults.Error, invocation, metav1.StatusReasonInvalid) + continue + } + + // if preconditions are not met, then skip mutations + if !matchResults.Matches { + continue + } + } + + invocationKey, invocationKeyErr := keyFor(invocation) + if invocationKeyErr != nil { + // This should never happen. It occurs if there is a programming + // error causing the Param not to be a valid object. + return nil, k8serrors.NewInternalError(invocationKeyErr) + } + if reinvokeCtx.IsReinvoke() && !policyReinvokeCtx.ShouldReinvoke(invocationKey) { + continue + } + + objectBeforeMutations := versionedAttr.VersionedObject + // Mutations for a single invocation of a MutatingAdmissionPolicy are evaluated + // in order. + for mutationIndex := range invocation.Policy.Spec.Mutations { + lastVersionedAttr = versionedAttr + if versionedAttr.VersionedObject == nil { // Do not call patchers if there is no object to patch. + continue + } + + patcher := invocation.Evaluator.Mutators[mutationIndex] + optionalVariables := cel.OptionalVariableBindings{VersionedParams: invocation.Param, Authorizer: authz} + err = d.dispatchOne(ctx, patcher, o, versionedAttr, namespace, invocation.Resource, optionalVariables) + if err != nil { + var statusError *k8serrors.StatusError + if errors.As(err, &statusError) { + return nil, statusError + } + + addConfigError(err, invocation, metav1.StatusReasonInvalid) + continue + } + } + if !apiequality.Semantic.DeepEqual(objectBeforeMutations, versionedAttr.VersionedObject) { + // The mutation has changed the object. Prepare to reinvoke all previous mutations that are eligible for re-invocation. + policyReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + reinvokeCtx.SetShouldReinvoke() + } + if invocation.Policy.Spec.ReinvocationPolicy == v1alpha1.IfNeededReinvocationPolicy { + policyReinvokeCtx.AddReinvocablePolicyToPreviouslyInvoked(invocationKey) + } + } + + if lastVersionedAttr != nil && lastVersionedAttr.VersionedObject != nil && lastVersionedAttr.Dirty { + policyReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + reinvokeCtx.SetShouldReinvoke() + if err := o.GetObjectConvertor().Convert(lastVersionedAttr.VersionedObject, lastVersionedAttr.Attributes.GetObject(), nil); err != nil { + return nil, k8serrors.NewInternalError(fmt.Errorf("failed to convert object: %w", err)) + } + } + + return policyErrors, nil +} + +func (d *dispatcher) dispatchOne( + ctx context.Context, + patcher patch.Patcher, + o admission.ObjectInterfaces, + versionedAttributes *admission.VersionedAttributes, + namespace *v1.Namespace, + resource schema.GroupVersionResource, + optionalVariables cel.OptionalVariableBindings, +) (err error) { + if patcher == nil { + // internal error. this should not happen + return k8serrors.NewInternalError(fmt.Errorf("policy evaluator is nil")) + } + + // Find type converter for the invoked Group-Version. + typeConverter := d.typeConverterManager.GetTypeConverter(versionedAttributes.VersionedKind) + if typeConverter == nil { + // This can happen if the request is for a resource whose schema + // has not been registered with the type converter manager. + return k8serrors.NewServiceUnavailable(fmt.Sprintf("Resource kind %s not found. There can be a delay between when CustomResourceDefinitions are created and when they are available.", versionedAttributes.VersionedKind)) + } + + patchRequest := patch.Request{ + MatchedResource: resource, + VersionedAttributes: versionedAttributes, + ObjectInterfaces: o, + OptionalVariables: optionalVariables, + Namespace: namespace, + TypeConverter: typeConverter, + } + newVersionedObject, err := patcher.Patch(ctx, patchRequest, celconfig.RuntimeCELCostBudget) + if err != nil { + return err + } + + switch versionedAttributes.VersionedObject.(type) { + case *unstructured.Unstructured: + // No conversion needed before defaulting for the patch object if the admitted object is unstructured. + default: + // Before defaulting, if the admitted object is a typed object, convert unstructured patch result back to a typed object. + newVersionedObject, err = o.GetObjectConvertor().ConvertToVersion(newVersionedObject, versionedAttributes.GetKind().GroupVersion()) + if err != nil { + return err + } + } + o.GetObjectDefaulter().Default(newVersionedObject) + + versionedAttributes.Dirty = true + versionedAttributes.VersionedObject = newVersionedObject + return nil +} + +func keyFor(invocation generic.PolicyInvocation[*Policy, *PolicyBinding, PolicyEvaluator]) (key, error) { + var paramUID types.NamespacedName + if invocation.Param != nil { + paramAccessor, err := meta.Accessor(invocation.Param) + if err != nil { + // This should never happen, as the param should have been validated + // before being passed to the plugin. + return key{}, err + } + paramUID = types.NamespacedName{ + Name: paramAccessor.GetName(), + Namespace: paramAccessor.GetNamespace(), + } + } + + return key{ + PolicyUID: types.NamespacedName{ + Name: invocation.Policy.GetName(), + Namespace: invocation.Policy.GetNamespace(), + }, + BindingUID: types.NamespacedName{ + Name: invocation.Binding.GetName(), + Namespace: invocation.Binding.GetNamespace(), + }, + ParamUID: paramUID, + }, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/interface.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/interface.go new file mode 100644 index 000000000..d717adc29 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/cel" +) + +// Patcher provides a patch function to perform a mutation to an object in the admission chain. +type Patcher interface { + // Patch returns a copy of the object in the request, modified to change specified by the patch. + // The original object in the request MUST NOT be modified in-place. + Patch(ctx context.Context, request Request, runtimeCELCostBudget int64) (runtime.Object, error) +} + +// Request defines the arguments required by a patcher. +type Request struct { + MatchedResource schema.GroupVersionResource + VersionedAttributes *admission.VersionedAttributes + ObjectInterfaces admission.ObjectInterfaces + OptionalVariables cel.OptionalVariableBindings + Namespace *v1.Namespace + TypeConverter managedfields.TypeConverter +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/json_patch.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/json_patch.go new file mode 100644 index 000000000..26f73dd34 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/json_patch.go @@ -0,0 +1,192 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "context" + gojson "encoding/json" + "errors" + "fmt" + celgo "github.com/google/cel-go/cel" + "reflect" + "strconv" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/traits" + "google.golang.org/protobuf/types/known/structpb" + jsonpatch "gopkg.in/evanphx/json-patch.v4" + + admissionv1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + plugincel "k8s.io/apiserver/pkg/admission/plugin/cel" + "k8s.io/apiserver/pkg/cel/mutation" + "k8s.io/apiserver/pkg/cel/mutation/dynamic" + pointer "k8s.io/utils/ptr" +) + +// JSONPatchCondition contains the inputs needed to compile and evaluate a cel expression +// that returns a JSON patch value. +type JSONPatchCondition struct { + Expression string +} + +var _ plugincel.ExpressionAccessor = &JSONPatchCondition{} + +func (v *JSONPatchCondition) GetExpression() string { + return v.Expression +} + +func (v *JSONPatchCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.ListType(jsonPatchType)} +} + +var jsonPatchType = types.NewObjectType("JSONPatch") + +// NewJSONPatcher creates a patcher that performs a JSON Patch mutation. +func NewJSONPatcher(patchEvaluator plugincel.MutatingEvaluator) Patcher { + return &jsonPatcher{patchEvaluator} +} + +type jsonPatcher struct { + PatchEvaluator plugincel.MutatingEvaluator +} + +func (e *jsonPatcher) Patch(ctx context.Context, r Request, runtimeCELCostBudget int64) (runtime.Object, error) { + admissionRequest := plugincel.CreateAdmissionRequest( + r.VersionedAttributes.Attributes, + metav1.GroupVersionResource(r.MatchedResource), + metav1.GroupVersionKind(r.VersionedAttributes.VersionedKind)) + + compileErrors := e.PatchEvaluator.CompilationErrors() + if len(compileErrors) > 0 { + return nil, errors.Join(compileErrors...) + } + patchObj, _, err := e.evaluatePatchExpression(ctx, e.PatchEvaluator, runtimeCELCostBudget, r, admissionRequest) + if err != nil { + return nil, err + } + o := r.ObjectInterfaces + jsonSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, o.GetObjectCreater(), o.GetObjectTyper(), json.SerializerOptions{Pretty: false, Strict: true}) + objJS, err := runtime.Encode(jsonSerializer, r.VersionedAttributes.VersionedObject) + if err != nil { + return nil, fmt.Errorf("failed to create JSON patch: %w", err) + } + patchedJS, err := patchObj.Apply(objJS) + if err != nil { + if errors.Is(err, jsonpatch.ErrTestFailed) { + // If a json patch fails a test operation, the patch must not be applied + return r.VersionedAttributes.VersionedObject, nil + } + return nil, fmt.Errorf("JSON Patch: %w", err) + } + + var newVersionedObject runtime.Object + if _, ok := r.VersionedAttributes.VersionedObject.(*unstructured.Unstructured); ok { + newVersionedObject = &unstructured.Unstructured{} + } else { + newVersionedObject, err = o.GetObjectCreater().New(r.VersionedAttributes.VersionedKind) + if err != nil { + return nil, apierrors.NewInternalError(err) + } + } + + if newVersionedObject, _, err = jsonSerializer.Decode(patchedJS, nil, newVersionedObject); err != nil { + return nil, apierrors.NewInternalError(err) + } + + return newVersionedObject, nil +} + +func (e *jsonPatcher) evaluatePatchExpression(ctx context.Context, patchEvaluator plugincel.MutatingEvaluator, remainingBudget int64, r Request, admissionRequest *admissionv1.AdmissionRequest) (jsonpatch.Patch, int64, error) { + var err error + var eval plugincel.EvaluationResult + eval, remainingBudget, err = patchEvaluator.ForInput(ctx, r.VersionedAttributes, admissionRequest, r.OptionalVariables, r.Namespace, remainingBudget) + if err != nil { + return nil, -1, err + } + if eval.Error != nil { + return nil, -1, eval.Error + } + refVal := eval.EvalResult + + // the return type can be any valid CEL value. + // Scalars, maps and lists are used to set the value when the path points to a field of that type. + // ObjectVal is used when the path points to a struct. A map like "{"field1": 1, "fieldX": bool}" is not + // possible in Kubernetes CEL because maps and lists may not have mixed types. + + iter, ok := refVal.(traits.Lister) + if !ok { + // Should never happen since compiler checks return type. + return nil, -1, fmt.Errorf("type mismatch: JSONPatchType.expression should evaluate to array") + } + result := jsonpatch.Patch{} + for it := iter.Iterator(); it.HasNext() == types.True; { + v := it.Next() + patchObj, err := v.ConvertToNative(reflect.TypeOf(&mutation.JSONPatchVal{})) + if err != nil { + // Should never happen since return type is checked by compiler. + return nil, -1, fmt.Errorf("type mismatch: JSONPatchType.expression should evaluate to array of JSONPatch: %w", err) + } + op, ok := patchObj.(*mutation.JSONPatchVal) + if !ok { + // Should never happen since return type is checked by compiler. + return nil, -1, fmt.Errorf("type mismatch: JSONPatchType.expression should evaluate to array of JSONPatch, got element of %T", patchObj) + } + + // Construct a JSON Patch from the evaluated CEL expression + resultOp := jsonpatch.Operation{} + resultOp["op"] = pointer.To(gojson.RawMessage(strconv.Quote(op.Op))) + resultOp["path"] = pointer.To(gojson.RawMessage(strconv.Quote(op.Path))) + if len(op.From) > 0 { + resultOp["from"] = pointer.To(gojson.RawMessage(strconv.Quote(op.From))) + } + if op.Val != nil { + if objVal, ok := op.Val.(*dynamic.ObjectVal); ok { + // TODO: Object initializers are insufficiently type checked. + // In the interim, we use this sanity check to detect type mismatches + // between field names and Object initializers. For example, + // "Object.spec{ selector: Object.spec.wrong{}}" is detected as a mismatch. + // Before beta, attaching full type information both to Object initializers and + // the "object" and "oldObject" variables is needed. This will allow CEL to + // perform comprehensive runtime type checking. + err := objVal.CheckTypeNamesMatchFieldPathNames() + if err != nil { + return nil, -1, fmt.Errorf("type mismatch: %w", err) + } + } + // CEL data literals representing arbitrary JSON values can be serialized to JSON for use in + // JSON Patch if first converted to pb.Value. + v, err := op.Val.ConvertToNative(reflect.TypeOf(&structpb.Value{})) + if err != nil { + return nil, -1, fmt.Errorf("JSONPath valueExpression evaluated to a type that could not marshal to JSON: %w", err) + } + b, err := gojson.Marshal(v) + if err != nil { + return nil, -1, fmt.Errorf("JSONPath valueExpression evaluated to a type that could not marshal to JSON: %w", err) + } + resultOp["value"] = pointer.To[gojson.RawMessage](b) + } + + result = append(result, resultOp) + } + + return result, remainingBudget, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/smd.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/smd.go new file mode 100644 index 000000000..cb078b777 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/smd.go @@ -0,0 +1,217 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "context" + "errors" + "fmt" + celgo "github.com/google/cel-go/cel" + celtypes "github.com/google/cel-go/common/types" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/managedfields" + plugincel "k8s.io/apiserver/pkg/admission/plugin/cel" + "k8s.io/apiserver/pkg/cel/mutation/dynamic" +) + +// ApplyConfigurationCondition contains the inputs needed to compile and evaluate a cel expression +// that returns an apply configuration +type ApplyConfigurationCondition struct { + Expression string +} + +var _ plugincel.ExpressionAccessor = &ApplyConfigurationCondition{} + +func (v *ApplyConfigurationCondition) GetExpression() string { + return v.Expression +} + +func (v *ApplyConfigurationCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{applyConfigObjectType} +} + +var applyConfigObjectType = celtypes.NewObjectType("Object") + +// NewApplyConfigurationPatcher creates a patcher that performs an applyConfiguration mutation. +func NewApplyConfigurationPatcher(expressionEvaluator plugincel.MutatingEvaluator) Patcher { + return &applyConfigPatcher{expressionEvaluator: expressionEvaluator} +} + +type applyConfigPatcher struct { + expressionEvaluator plugincel.MutatingEvaluator +} + +func (e *applyConfigPatcher) Patch(ctx context.Context, r Request, runtimeCELCostBudget int64) (runtime.Object, error) { + admissionRequest := plugincel.CreateAdmissionRequest( + r.VersionedAttributes.Attributes, + metav1.GroupVersionResource(r.MatchedResource), + metav1.GroupVersionKind(r.VersionedAttributes.VersionedKind)) + + compileErrors := e.expressionEvaluator.CompilationErrors() + if len(compileErrors) > 0 { + return nil, errors.Join(compileErrors...) + } + eval, _, err := e.expressionEvaluator.ForInput(ctx, r.VersionedAttributes, admissionRequest, r.OptionalVariables, r.Namespace, runtimeCELCostBudget) + if err != nil { + return nil, err + } + if eval.Error != nil { + return nil, eval.Error + } + v := eval.EvalResult + + // The compiler ensures that the return type is an ObjectVal with type name of "Object". + objVal, ok := v.(*dynamic.ObjectVal) + if !ok { + // Should not happen since the compiler type checks the return type. + return nil, fmt.Errorf("unsupported return type from ApplyConfiguration expression: %v", v.Type()) + } + // TODO: Object initializers are insufficiently type checked. + // In the interim, we use this sanity check to detect type mismatches + // between field names and Object initializers. For example, + // "Object.spec{ selector: Object.spec.wrong{}}" is detected as a mismatch. + // Before beta, attaching full type information both to Object initializers and + // the "object" and "oldObject" variables is needed. This will allow CEL to + // perform comprehensive runtime type checking. + err = objVal.CheckTypeNamesMatchFieldPathNames() + if err != nil { + return nil, fmt.Errorf("type mismatch: %w", err) + } + + value, ok := objVal.Value().(map[string]any) + if !ok { + return nil, fmt.Errorf("invalid return type: %T", v) + } + + patchObject := unstructured.Unstructured{Object: value} + patchObject.SetGroupVersionKind(r.VersionedAttributes.VersionedObject.GetObjectKind().GroupVersionKind()) + patched, err := ApplyStructuredMergeDiff(r.TypeConverter, r.VersionedAttributes.VersionedObject, &patchObject) + if err != nil { + return nil, fmt.Errorf("error applying patch: %w", err) + } + return patched, nil +} + +// ApplyStructuredMergeDiff applies a structured merge diff to an object and returns a copy of the object +// with the patch applied. +func ApplyStructuredMergeDiff( + typeConverter managedfields.TypeConverter, + originalObject runtime.Object, + patch *unstructured.Unstructured, +) (runtime.Object, error) { + if patch.GroupVersionKind() != originalObject.GetObjectKind().GroupVersionKind() { + return nil, fmt.Errorf("patch (%v) and original object (%v) are not of the same gvk", patch.GroupVersionKind().String(), originalObject.GetObjectKind().GroupVersionKind().String()) + } else if typeConverter == nil { + return nil, fmt.Errorf("type converter must not be nil") + } + + patchObjTyped, err := typeConverter.ObjectToTyped(patch) + if err != nil { + return nil, fmt.Errorf("failed to convert patch object to typed object: %w", err) + } + + err = validatePatch(patchObjTyped) + if err != nil { + return nil, fmt.Errorf("invalid ApplyConfiguration: %w", err) + } + + liveObjTyped, err := typeConverter.ObjectToTyped(originalObject) + if err != nil { + return nil, fmt.Errorf("failed to convert original object to typed object: %w", err) + } + + newObjTyped, err := liveObjTyped.Merge(patchObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to merge patch: %w", err) + } + + // Our mutating admission policy sets the fields but does not track ownership. + // Newly introduced fields in the patch won't be tracked by a field manager + // (so if the original object is updated again but the mutating policy is + // not active, the fields will be dropped). + // + // This necessarily means that changes to an object by a mutating policy + // are only preserved if the policy was active at the time of the change. + // (If the policy is not active, the changes may be dropped.) + + newObj, err := typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to convert typed object to object: %w", err) + } + + return newObj, nil +} + +// validatePatch searches an apply configuration for any arrays, maps or structs elements that are atomic and returns +// an error if any are found. +// This prevents accidental removal of fields that can occur when the user intends to modify some +// fields in an atomic type, not realizing that all fields not explicitly set in the new value +// of the atomic will be removed. +func validatePatch(v *typed.TypedValue) error { + atomics := findAtomics(nil, v.Schema(), v.TypeRef(), v.AsValue()) + if len(atomics) > 0 { + return fmt.Errorf("may not mutate atomic arrays, maps or structs: %v", strings.Join(atomics, ", ")) + } + return nil +} + +// findAtomics returns field paths for any atomic arrays, maps or structs found when traversing the given value. +func findAtomics(path []fieldpath.PathElement, s *schema.Schema, tr schema.TypeRef, v value.Value) (atomics []string) { + if a, ok := s.Resolve(tr); ok { // Validation pass happens before this and checks that all schemas can be resolved + if v.IsMap() && a.Map != nil { + if a.Map.ElementRelationship == schema.Atomic { + atomics = append(atomics, pathString(path)) + } + v.AsMap().Iterate(func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + if sf, ok := a.Map.FindField(key); ok { + tr = sf.Type + atomics = append(atomics, findAtomics(append(path, pe), s, tr, val)...) + } + return true + }) + } + if v.IsList() && a.List != nil { + if a.List.ElementRelationship == schema.Atomic { + atomics = append(atomics, pathString(path)) + } + list := v.AsList() + for i := 0; i < list.Length(); i++ { + pe := fieldpath.PathElement{Index: &i} + atomics = append(atomics, findAtomics(append(path, pe), s, a.List.ElementType, list.At(i))...) + } + } + } + return atomics +} + +func pathString(path []fieldpath.PathElement) string { + sb := strings.Builder{} + for _, p := range path { + sb.WriteString(p.String()) + } + return sb.String() +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/typeconverter.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/typeconverter.go new file mode 100644 index 000000000..96ca7f037 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch/typeconverter.go @@ -0,0 +1,187 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/openapi" + "k8s.io/kube-openapi/pkg/spec3" +) + +type TypeConverterManager interface { + // GetTypeConverter returns a type converter for the given GVK + GetTypeConverter(gvk schema.GroupVersionKind) managedfields.TypeConverter + Run(ctx context.Context) +} + +func NewTypeConverterManager( + staticTypeConverter managedfields.TypeConverter, + openapiClient openapi.Client, +) TypeConverterManager { + return &typeConverterManager{ + staticTypeConverter: staticTypeConverter, + openapiClient: openapiClient, + typeConverterMap: make(map[schema.GroupVersion]typeConverterCacheEntry), + lastFetchedPaths: make(map[schema.GroupVersion]openapi.GroupVersion), + } +} + +type typeConverterCacheEntry struct { + typeConverter managedfields.TypeConverter + entry openapi.GroupVersion +} + +// typeConverterManager helps us make sure we have an up to date schema and +// type converter for our openapi models. It should be connfigured to use a +// static type converter for natively typed schemas, and fetches the schema +// for CRDs/other over the network on demand (trying to reduce network calls where necessary) +type typeConverterManager struct { + // schemaCache is used to cache the schema for a given GVK + staticTypeConverter managedfields.TypeConverter + + // discoveryClient is used to fetch the schema for a given GVK + openapiClient openapi.Client + + lock sync.RWMutex + + typeConverterMap map[schema.GroupVersion]typeConverterCacheEntry + lastFetchedPaths map[schema.GroupVersion]openapi.GroupVersion +} + +func (t *typeConverterManager) Run(ctx context.Context) { + // Loop every 5s refershing the OpenAPI schema list to know which + // schemas have been invalidated. This should use e-tags under the hood + _ = wait.PollUntilContextCancel(ctx, 5*time.Second, true, func(_ context.Context) (done bool, err error) { + paths, err := t.openapiClient.Paths() + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to fetch openapi paths: %w", err)) + return false, nil + } + + // The /openapi/v3 endpoint contains a list of paths whose ServerRelativeURL + // value changes every time the schema is updated. So we poll /openapi/v3 + // to get the "version number" for each schema, and invalidate our cache + // if the version number has changed since we pulled it. + parsedPaths := make(map[schema.GroupVersion]openapi.GroupVersion, len(paths)) + for path, entry := range paths { + if !strings.HasPrefix(path, "apis/") && !strings.HasPrefix(path, "api/") { + continue + } + path = strings.TrimPrefix(path, "apis/") + path = strings.TrimPrefix(path, "api/") + + gv, err := schema.ParseGroupVersion(path) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to parse group version %q: %w", path, err)) + return false, nil + } + + parsedPaths[gv] = entry + } + + t.lock.Lock() + defer t.lock.Unlock() + t.lastFetchedPaths = parsedPaths + return false, nil + }) +} + +func (t *typeConverterManager) GetTypeConverter(gvk schema.GroupVersionKind) managedfields.TypeConverter { + // Check to see if the static type converter handles this GVK + if t.staticTypeConverter != nil { + //!TODO: Add ability to check existence to type converter + // working around for now but seeing if getting a typed version of an + // empty object returns error + stub := &unstructured.Unstructured{} + stub.SetGroupVersionKind(gvk) + + if _, err := t.staticTypeConverter.ObjectToTyped(stub); err == nil { + return t.staticTypeConverter + } + } + + gv := gvk.GroupVersion() + + existing, entry, err := func() (managedfields.TypeConverter, openapi.GroupVersion, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + // If schema is not supported by static type converter, ask discovery + // for the schema + entry, ok := t.lastFetchedPaths[gv] + if !ok { + // If we can't get the schema, we can't do anything + return nil, nil, fmt.Errorf("no schema for %v", gvk) + } + + // If the entry schema has not changed, used the same type converter + if existing, ok := t.typeConverterMap[gv]; ok && existing.entry.ServerRelativeURL() == entry.ServerRelativeURL() { + // If we have a type converter for this GVK, return it + return existing.typeConverter, existing.entry, nil + } + + return nil, entry, nil + }() + if err != nil { + utilruntime.HandleError(err) + return nil + } else if existing != nil { + return existing + } + + schBytes, err := entry.Schema(runtime.ContentTypeJSON) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get schema for %v: %w", gvk, err)) + return nil + } + + var sch spec3.OpenAPI + if err := json.Unmarshal(schBytes, &sch); err != nil { + utilruntime.HandleError(fmt.Errorf("failed to unmarshal schema for %v: %w", gvk, err)) + return nil + } + + // The schema has changed, or there is no entry for it, generate + // a new type converter for this GV + tc, err := managedfields.NewTypeConverter(sch.Components.Schemas, false) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to create type converter for %v: %w", gvk, err)) + return nil + } + + t.lock.Lock() + defer t.lock.Unlock() + + t.typeConverterMap[gv] = typeConverterCacheEntry{ + typeConverter: tc, + entry: entry, + } + + return tc +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/plugin.go new file mode 100644 index 000000000..527bc6a53 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/plugin.go @@ -0,0 +1,151 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + celgo "github.com/google/cel-go/cel" + "io" + + "k8s.io/api/admissionregistration/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/cel" + "k8s.io/apiserver/pkg/admission/plugin/policy/generic" + "k8s.io/apiserver/pkg/admission/plugin/policy/matching" + "k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch" + "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/features" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "MutatingAdmissionPolicy" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + return NewPlugin(configFile), nil + }) +} + +type Policy = v1alpha1.MutatingAdmissionPolicy +type PolicyBinding = v1alpha1.MutatingAdmissionPolicyBinding +type PolicyMutation = v1alpha1.Mutation +type PolicyHook = generic.PolicyHook[*Policy, *PolicyBinding, PolicyEvaluator] + +type Mutator struct { +} +type MutationEvaluationFunc func( + ctx context.Context, + matchedResource schema.GroupVersionResource, + versionedAttr *admission.VersionedAttributes, + o admission.ObjectInterfaces, + versionedParams runtime.Object, + namespace *corev1.Namespace, + typeConverter managedfields.TypeConverter, + runtimeCELCostBudget int64, + authorizer authorizer.Authorizer, +) (runtime.Object, error) + +type PolicyEvaluator struct { + Matcher matchconditions.Matcher + Mutators []patch.Patcher + CompositionEnv *cel.CompositionEnv + Error error +} + +// Plugin is an implementation of admission.Interface. +type Plugin struct { + *generic.Plugin[PolicyHook] +} + +var _ admission.Interface = &Plugin{} +var _ admission.MutationInterface = &Plugin{} + +// NewPlugin returns a generic admission webhook plugin. +func NewPlugin(_ io.Reader) *Plugin { + // There is no request body to mutate for DELETE, so this plugin never handles that operation. + handler := admission.NewHandler(admission.Create, admission.Update, admission.Connect) + res := &Plugin{} + res.Plugin = generic.NewPlugin( + handler, + func(f informers.SharedInformerFactory, client kubernetes.Interface, dynamicClient dynamic.Interface, restMapper meta.RESTMapper) generic.Source[PolicyHook] { + return generic.NewPolicySource( + f.Admissionregistration().V1alpha1().MutatingAdmissionPolicies().Informer(), + f.Admissionregistration().V1alpha1().MutatingAdmissionPolicyBindings().Informer(), + NewMutatingAdmissionPolicyAccessor, + NewMutatingAdmissionPolicyBindingAccessor, + compilePolicy, + //!TODO: Create a way to share param informers between + // mutating/validating plugins + f, + dynamicClient, + restMapper, + ) + }, + func(a authorizer.Authorizer, m *matching.Matcher, client kubernetes.Interface) generic.Dispatcher[PolicyHook] { + return NewDispatcher(a, m, patch.NewTypeConverterManager(nil, client.Discovery().OpenAPIV3())) + }, + ) + return res +} + +// Admit makes an admission decision based on the request attributes. +func (a *Plugin) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.Plugin.Dispatch(ctx, attr, o) +} + +func (a *Plugin) InspectFeatureGates(featureGates featuregate.FeatureGate) { + a.Plugin.SetEnabled(featureGates.Enabled(features.MutatingAdmissionPolicy)) +} + +// Variable is a named expression for composition. +type Variable struct { + Name string + Expression string +} + +func (v *Variable) GetExpression() string { + return v.Expression +} + +func (v *Variable) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.AnyType, celgo.DynType} +} + +func (v *Variable) GetName() string { + return v.Name +} + +func convertv1alpha1Variables(variables []v1alpha1.Variable) []cel.NamedExpressionAccessor { + namedExpressions := make([]cel.NamedExpressionAccessor, len(variables)) + for i, variable := range variables { + namedExpressions[i] = &Variable{Name: variable.Name, Expression: variable.Expression} + } + return namedExpressions +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/reinvocationcontext.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/reinvocationcontext.go new file mode 100644 index 000000000..764ce3927 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/mutating/reinvocationcontext.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" +) + +type key struct { + PolicyUID types.NamespacedName + BindingUID types.NamespacedName + ParamUID types.NamespacedName + MutationIndex int +} + +type policyReinvokeContext struct { + // lastPolicyOutput holds the result of the last Policy admission plugin call + lastPolicyOutput runtime.Object + // previouslyInvokedReinvocablePolicys holds the set of policies that have been invoked and + // should be reinvoked if a later mutation occurs + previouslyInvokedReinvocablePolicies sets.Set[key] + // reinvokePolicies holds the set of Policies that should be reinvoked + reinvokePolicies sets.Set[key] +} + +func (rc *policyReinvokeContext) ShouldReinvoke(policy key) bool { + return rc.reinvokePolicies.Has(policy) +} + +func (rc *policyReinvokeContext) IsOutputChangedSinceLastPolicyInvocation(object runtime.Object) bool { + return !apiequality.Semantic.DeepEqual(rc.lastPolicyOutput, object) +} + +func (rc *policyReinvokeContext) SetLastPolicyInvocationOutput(object runtime.Object) { + if object == nil { + rc.lastPolicyOutput = nil + return + } + rc.lastPolicyOutput = object.DeepCopyObject() +} + +func (rc *policyReinvokeContext) AddReinvocablePolicyToPreviouslyInvoked(policy key) { + if rc.previouslyInvokedReinvocablePolicies == nil { + rc.previouslyInvokedReinvocablePolicies = sets.New[key]() + } + rc.previouslyInvokedReinvocablePolicies.Insert(policy) +} + +func (rc *policyReinvokeContext) RequireReinvokingPreviouslyInvokedPlugins() { + if len(rc.previouslyInvokedReinvocablePolicies) > 0 { + if rc.reinvokePolicies == nil { + rc.reinvokePolicies = sets.New[key]() + } + for s := range rc.previouslyInvokedReinvocablePolicies { + rc.reinvokePolicies.Insert(s) + } + rc.previouslyInvokedReinvocablePolicies = sets.New[key]() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go index 97cef0914..628e3a653 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go @@ -54,6 +54,10 @@ func (v *validatingAdmissionPolicyAccessor) GetMatchConstraints() *v1.MatchResou return v.Spec.MatchConstraints } +func (v *validatingAdmissionPolicyAccessor) GetFailurePolicy() *v1.FailurePolicyType { + return v.Spec.FailurePolicy +} + type validatingAdmissionPolicyBindingAccessor struct { *v1.ValidatingAdmissionPolicyBinding } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go index f06011425..8f3e22f64 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" utiljson "k8s.io/apimachinery/pkg/util/json" "k8s.io/apiserver/pkg/admission" + admissionauthorizer "k8s.io/apiserver/pkg/admission/plugin/authorizer" "k8s.io/apiserver/pkg/admission/plugin/policy/generic" celmetrics "k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics" celconfig "k8s.io/apiserver/pkg/apis/cel" @@ -63,6 +64,10 @@ type policyDecisionWithMetadata struct { Binding *admissionregistrationv1.ValidatingAdmissionPolicyBinding } +func (c *dispatcher) Start(ctx context.Context) error { + return nil +} + // Dispatch implements generic.Dispatcher. func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []PolicyHook) error { @@ -109,7 +114,7 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm } } - authz := newCachingAuthorizer(c.authz) + authz := admissionauthorizer.NewCachingAuthorizer(c.authz) for _, hook := range hooks { // versionedAttributes will be set to non-nil inside of the loop, but diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go index 432725261..bb5710391 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cel +package metrics import ( "errors" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go index 92c8ee2b8..c4c98e139 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cel +package metrics import ( "context" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go index 06f4a8c71..85db23cd8 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go @@ -36,7 +36,6 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "k8s.io/component-base/featuregate" ) const ( @@ -93,13 +92,12 @@ type Plugin struct { var _ admission.Interface = &Plugin{} var _ admission.ValidationInterface = &Plugin{} -var _ initializer.WantsFeatures = &Plugin{} var _ initializer.WantsExcludedAdmissionResources = &Plugin{} func NewPlugin(_ io.Reader) *Plugin { handler := admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update) - return &Plugin{ + p := &Plugin{ Plugin: generic.NewPlugin( handler, func(f informers.SharedInformerFactory, client kubernetes.Interface, dynamicClient dynamic.Interface, restMapper meta.RESTMapper) generic.Source[PolicyHook] { @@ -114,11 +112,13 @@ func NewPlugin(_ io.Reader) *Plugin { restMapper, ) }, - func(a authorizer.Authorizer, m *matching.Matcher) generic.Dispatcher[PolicyHook] { + func(a authorizer.Authorizer, m *matching.Matcher, client kubernetes.Interface) generic.Dispatcher[PolicyHook] { return NewDispatcher(a, generic.NewPolicyMatcher(m)) }, ), } + p.SetEnabled(true) + return p } // Validate makes an admission decision based on the request attributes. @@ -126,10 +126,6 @@ func (a *Plugin) Validate(ctx context.Context, attr admission.Attributes, o admi return a.Plugin.Dispatch(ctx, attr, o) } -func (a *Plugin) InspectFeatureGates(featureGates featuregate.FeatureGate) { - a.Plugin.SetEnabled(featureGates.Enabled(features.ValidatingAdmissionPolicy)) -} - func compilePolicy(policy *Policy) Validator { hasParam := false if policy.Spec.ParamKind != nil { @@ -155,13 +151,13 @@ func compilePolicy(policy *Policy) Validator { for i := range matchConditions { matchExpressionAccessors[i] = (*matchconditions.MatchCondition)(&matchConditions[i]) } - matcher = matchconditions.NewMatcher(filterCompiler.Compile(matchExpressionAccessors, optionalVars, environment.StoredExpressions), failurePolicy, "policy", "validate", policy.Name) + matcher = matchconditions.NewMatcher(filterCompiler.CompileCondition(matchExpressionAccessors, optionalVars, environment.StoredExpressions), failurePolicy, "policy", "validate", policy.Name) } res := NewValidator( - filterCompiler.Compile(convertv1Validations(policy.Spec.Validations), optionalVars, environment.StoredExpressions), + filterCompiler.CompileCondition(convertv1Validations(policy.Spec.Validations), optionalVars, environment.StoredExpressions), matcher, - filterCompiler.Compile(convertv1AuditAnnotations(policy.Spec.AuditAnnotations), optionalVars, environment.StoredExpressions), - filterCompiler.Compile(convertv1MessageExpressions(policy.Spec.Validations), expressionOptionalVars, environment.StoredExpressions), + filterCompiler.CompileCondition(convertv1AuditAnnotations(policy.Spec.AuditAnnotations), optionalVars, environment.StoredExpressions), + filterCompiler.CompileCondition(convertv1MessageExpressions(policy.Spec.Validations), expressionOptionalVars, environment.StoredExpressions), failurePolicy, ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go index c429ae22f..4057e515e 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go @@ -41,13 +41,13 @@ import ( // validator implements the Validator interface type validator struct { celMatcher matchconditions.Matcher - validationFilter cel.Filter - auditAnnotationFilter cel.Filter - messageFilter cel.Filter + validationFilter cel.ConditionEvaluator + auditAnnotationFilter cel.ConditionEvaluator + messageFilter cel.ConditionEvaluator failPolicy *v1.FailurePolicyType } -func NewValidator(validationFilter cel.Filter, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.Filter, failPolicy *v1.FailurePolicyType) Validator { +func NewValidator(validationFilter cel.ConditionEvaluator, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.ConditionEvaluator, failPolicy *v1.FailurePolicyType) Validator { return &validator{ celMatcher: celMatcher, validationFilter: validationFilter, @@ -122,6 +122,7 @@ func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVe messageResults, _, err := v.messageFilter.ForInput(ctx, versionedAttr, admissionRequest, expressionOptionalVars, ns, remainingBudget) for i, evalResult := range evalResults { var decision = &decisions[i] + decision.Elapsed = evalResult.Elapsed // TODO: move this to generics validation, ok := evalResult.ExpressionAccessor.(*ValidationCondition) if !ok { @@ -146,6 +147,7 @@ func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVe decision.Message = fmt.Sprintf("failed messageExpression: %s", err) } else if evalResult.EvalResult != celtypes.True { decision.Action = ActionDeny + decision.Evaluation = EvalDeny if validation.Reason == nil { decision.Reason = metav1.StatusReasonInvalid } else { @@ -210,6 +212,7 @@ func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVe continue } var auditAnnotationResult = &auditAnnotationResults[i] + auditAnnotationResult.Elapsed = evalResult.Elapsed // TODO: move this to generics validation, ok := evalResult.ExpressionAccessor.(*AuditAnnotationCondition) if !ok { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go index f23580cc0..7ae29d5bb 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go @@ -50,7 +50,7 @@ type WebhookAccessor interface { GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) // GetCompiledMatcher gets the compiled matcher object - GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher + GetCompiledMatcher(compiler cel.ConditionCompiler) matchconditions.Matcher // GetName gets the webhook Name field. Note that the name is scoped to the webhook // configuration and does not provide a globally unique identity, if a unique identity is @@ -132,7 +132,7 @@ func (m *mutatingWebhookAccessor) GetType() string { return "admit" } -func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher { +func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.ConditionCompiler) matchconditions.Matcher { m.compileMatcher.Do(func() { expressions := make([]cel.ExpressionAccessor, len(m.MutatingWebhook.MatchConditions)) for i, matchCondition := range m.MutatingWebhook.MatchConditions { @@ -145,7 +145,7 @@ func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler if utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks) { strictCost = true } - m.compiledMatcher = matchconditions.NewMatcher(compiler.Compile( + m.compiledMatcher = matchconditions.NewMatcher(compiler.CompileCondition( expressions, cel.OptionalVariableDeclarations{ HasParams: false, @@ -265,7 +265,7 @@ func (v *validatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.Cli return v.client, v.clientErr } -func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher { +func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.ConditionCompiler) matchconditions.Matcher { v.compileMatcher.Do(func() { expressions := make([]cel.ExpressionAccessor, len(v.ValidatingWebhook.MatchConditions)) for i, matchCondition := range v.ValidatingWebhook.MatchConditions { @@ -278,7 +278,7 @@ func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompil if utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks) { strictCost = true } - v.compiledMatcher = matchconditions.NewMatcher(compiler.Compile( + v.compiledMatcher = matchconditions.NewMatcher(compiler.CompileCondition( expressions, cel.OptionalVariableDeclarations{ HasParams: false, diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go index f067b3f72..8db7d3ced 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go @@ -21,6 +21,9 @@ import ( "fmt" "io" + "k8s.io/apiserver/pkg/cel/environment" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" admissionv1 "k8s.io/api/admission/v1" @@ -38,9 +41,6 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" webhookutil "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -57,7 +57,7 @@ type Webhook struct { namespaceMatcher *namespace.Matcher objectMatcher *object.Matcher dispatcher Dispatcher - filterCompiler cel.FilterCompiler + filterCompiler cel.ConditionCompiler authorizer authorizer.Authorizer } @@ -102,7 +102,7 @@ func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory namespaceMatcher: &namespace.Matcher{}, objectMatcher: &object.Matcher{}, dispatcher: dispatcherFactory(&cm), - filterCompiler: cel.NewFilterCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks))), + filterCompiler: cel.NewConditionCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks))), }, nil } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go index 21dd28f6c..b9b4c9169 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go @@ -54,14 +54,14 @@ var _ Matcher = &matcher{} // matcher evaluates compiled cel expressions and determines if they match the given request or not type matcher struct { - filter celplugin.Filter + filter celplugin.ConditionEvaluator failPolicy v1.FailurePolicyType matcherType string matcherKind string objectName string } -func NewMatcher(filter celplugin.Filter, failPolicy *v1.FailurePolicyType, matcherKind, matcherType, objectName string) Matcher { +func NewMatcher(filter celplugin.ConditionEvaluator, failPolicy *v1.FailurePolicyType, matcherKind, matcherType, objectName string) Matcher { var f v1.FailurePolicyType if failPolicy == nil { f = v1.Fail diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 5bf80237f..77fac3c11 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -190,7 +190,7 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "admit", 200) } if changed { - // Patch had changed the object. Prepare to reinvoke all previous webhooks that are eligible for re-invocation. + // Patch had changed the object. Prepare to reinvoke all previous mutations that are eligible for re-invocation. webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() reinvokeCtx.SetShouldReinvoke() } @@ -348,7 +348,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss } var patchedJS []byte - jsonSerializer := json.NewSerializer(json.DefaultMetaFactory, o.GetObjectCreater(), o.GetObjectTyper(), false) + jsonSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, o.GetObjectCreater(), o.GetObjectTyper(), json.SerializerOptions{}) switch result.PatchType { // VerifyAdmissionResponse normalizes to v1 patch types, regardless of the AdmissionReview version used case admissionv1.PatchTypeJSONPatch: diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go index b926f65dc..10bef0a8f 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go @@ -121,7 +121,7 @@ func (r *Matcher) resource() bool { func IsExemptAdmissionConfigurationResource(attr admission.Attributes) bool { gvk := attr.GetKind() if gvk.Group == "admissionregistration.k8s.io" { - if gvk.Kind == "ValidatingWebhookConfiguration" || gvk.Kind == "MutatingWebhookConfiguration" || gvk.Kind == "ValidatingAdmissionPolicy" || gvk.Kind == "ValidatingAdmissionPolicyBinding" { + if gvk.Kind == "ValidatingWebhookConfiguration" || gvk.Kind == "MutatingWebhookConfiguration" || gvk.Kind == "ValidatingAdmissionPolicy" || gvk.Kind == "ValidatingAdmissionPolicyBinding" || gvk.Kind == "MutatingAdmissionPolicy" || gvk.Kind == "MutatingAdmissionPolicyBinding" { return true } } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index af70fe244..a610ebc1a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -401,6 +401,13 @@ type WebhookMatchCondition struct { // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, // the contents would be converted to the v1 version before evaluating the CEL expression. // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Expression string } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go index b71b53c65..46fb841a5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go @@ -48,3 +48,12 @@ func SetDefaults_KMSConfiguration(obj *KMSConfiguration) { obj.CacheSize = &defaultCacheSize } } + +func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { + if obj.AuthorizedTTL.Duration == 0 { + obj.AuthorizedTTL.Duration = 5 * time.Minute + } + if obj.UnauthorizedTTL.Duration == 0 { + obj.UnauthorizedTTL.Duration = 30 * time.Second + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go index 0de8db497..7b1b51b62 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go @@ -47,6 +47,7 @@ func init() { func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, + &AuthorizationConfiguration{}, &EncryptionConfiguration{}, ) // also register into the v1 group as EncryptionConfig (due to a docs bug) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go index e139dceb9..18328c558 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go @@ -48,3 +48,129 @@ type AdmissionPluginConfiguration struct { // +optional Configuration *runtime.Unknown `json:"configuration"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type string `json:"type"` + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string `json:"name"` + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + // Must not be defined when Type!=Webhook + Webhook *WebhookConfiguration `json:"webhook,omitempty"` +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration `json:"authorizedTTL"` + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration `json:"timeout"` + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string `json:"failurePolicy"` + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition `json:"matchConditions"` +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string `json:"type"` + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string `json:"kubeConfigFile"` +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string `json:"expression"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go index c0f218742..63083025a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go @@ -67,6 +67,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*apiserver.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*apiserver.EncryptionConfiguration), scope) }); err != nil { @@ -137,6 +157,36 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) + }); err != nil { + return err + } return nil } @@ -204,6 +254,50 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfigu return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in, out, s) } +func autoConvert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + out.Type = apiserver.AuthorizerType(in.Type) + out.Name = in.Name + out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + out.Type = string(in.Type) + out.Name = in.Name + out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in, out, s) +} + func autoConvert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in *EncryptionConfiguration, out *apiserver.EncryptionConfiguration, s conversion.Scope) error { out.Resources = *(*[]apiserver.ResourceConfiguration)(unsafe.Pointer(&in.Resources)) return nil @@ -361,3 +455,83 @@ func autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(i func Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *apiserver.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { return autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s) } + +func autoConvert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. +func Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) +} + +func autoConvert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration is an autogenerated conversion function. +func Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in, out, s) +} + +func autoConvert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. +func Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) +} + +func autoConvert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition is an autogenerated conversion function. +func Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go index cbdcaa5a0..6afdbd3a2 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go @@ -100,6 +100,59 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { *out = *in @@ -279,3 +332,65 @@ func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go index 82fec0111..4c8189b13 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go @@ -29,10 +29,20 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) }) return nil } +func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { + for i := range in.Authorizers { + a := &in.Authorizers[i] + if a.Webhook != nil { + SetDefaults_WebhookConfiguration(a.Webhook) + } + } +} + func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) { for i := range in.Resources { a := &in.Resources[i] diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index 214ef4e4f..dee2c115a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -615,6 +615,13 @@ type WebhookMatchCondition struct { // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, // the contents would be converted to the v1 version before evaluating the CEL expression. // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Expression string `json:"expression"` } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go index 570f3c468..a0e13593b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go @@ -586,6 +586,13 @@ type WebhookMatchCondition struct { // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, // the contents would be converted to the v1 version before evaluating the CEL expression. // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Expression string `json:"expression"` } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go index 5e80e2dc6..ac5f0f34f 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go @@ -38,14 +38,13 @@ import ( authenticationcel "k8s.io/apiserver/pkg/authentication/cel" authorizationcel "k8s.io/apiserver/pkg/authorization/cel" "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/util/cert" ) // ValidateAuthenticationConfiguration validates a given AuthenticationConfiguration. -func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration, disallowedIssuers []string) field.ErrorList { +func ValidateAuthenticationConfiguration(compiler authenticationcel.Compiler, c *api.AuthenticationConfiguration, disallowedIssuers []string) field.ErrorList { root := field.NewPath("jwt") var allErrs field.ErrorList @@ -62,7 +61,7 @@ func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration, dis seenDiscoveryURLs := sets.New[string]() for i, a := range c.JWT { fldPath := root.Index(i) - _, errs := validateJWTAuthenticator(a, fldPath, sets.New(disallowedIssuers...), utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) + _, errs := validateJWTAuthenticator(compiler, a, fldPath, sets.New(disallowedIssuers...), utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) allErrs = append(allErrs, errs...) if seenIssuers.Has(a.Issuer.URL) { @@ -93,18 +92,16 @@ func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration, dis // CompileAndValidateJWTAuthenticator validates a given JWTAuthenticator and returns a CELMapper with the compiled // CEL expressions for claim mappings and validation rules. // This is exported for use in oidc package. -func CompileAndValidateJWTAuthenticator(authenticator api.JWTAuthenticator, disallowedIssuers []string) (authenticationcel.CELMapper, field.ErrorList) { - return validateJWTAuthenticator(authenticator, nil, sets.New(disallowedIssuers...), utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) +func CompileAndValidateJWTAuthenticator(compiler authenticationcel.Compiler, authenticator api.JWTAuthenticator, disallowedIssuers []string) (authenticationcel.CELMapper, field.ErrorList) { + return validateJWTAuthenticator(compiler, authenticator, nil, sets.New(disallowedIssuers...), utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) } -func validateJWTAuthenticator(authenticator api.JWTAuthenticator, fldPath *field.Path, disallowedIssuers sets.Set[string], structuredAuthnFeatureEnabled bool) (authenticationcel.CELMapper, field.ErrorList) { +func validateJWTAuthenticator(compiler authenticationcel.Compiler, authenticator api.JWTAuthenticator, fldPath *field.Path, disallowedIssuers sets.Set[string], structuredAuthnFeatureEnabled bool) (authenticationcel.CELMapper, field.ErrorList) { var allErrs field.ErrorList - // strictCost is set to true which enables the strict cost for CEL validation. - compiler := authenticationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) state := &validationState{} - allErrs = append(allErrs, validateIssuer(authenticator.Issuer, disallowedIssuers, fldPath.Child("issuer"))...) + allErrs = append(allErrs, validateIssuer(authenticator.Issuer, disallowedIssuers, fldPath.Child("issuer"), structuredAuthnFeatureEnabled)...) allErrs = append(allErrs, validateClaimValidationRules(compiler, state, authenticator.ClaimValidationRules, fldPath.Child("claimValidationRules"), structuredAuthnFeatureEnabled)...) allErrs = append(allErrs, validateClaimMappings(compiler, state, authenticator.ClaimMappings, fldPath.Child("claimMappings"), structuredAuthnFeatureEnabled)...) allErrs = append(allErrs, validateUserValidationRules(compiler, state, authenticator.UserValidationRules, fldPath.Child("userValidationRules"), structuredAuthnFeatureEnabled)...) @@ -118,12 +115,12 @@ type validationState struct { usesEmailVerifiedClaim bool } -func validateIssuer(issuer api.Issuer, disallowedIssuers sets.Set[string], fldPath *field.Path) field.ErrorList { +func validateIssuer(issuer api.Issuer, disallowedIssuers sets.Set[string], fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, validateIssuerURL(issuer.URL, disallowedIssuers, fldPath.Child("url"))...) - allErrs = append(allErrs, validateIssuerDiscoveryURL(issuer.URL, issuer.DiscoveryURL, fldPath.Child("discoveryURL"))...) - allErrs = append(allErrs, validateAudiences(issuer.Audiences, issuer.AudienceMatchPolicy, fldPath.Child("audiences"), fldPath.Child("audienceMatchPolicy"))...) + allErrs = append(allErrs, validateIssuerDiscoveryURL(issuer.URL, issuer.DiscoveryURL, fldPath.Child("discoveryURL"), structuredAuthnFeatureEnabled)...) + allErrs = append(allErrs, validateAudiences(issuer.Audiences, issuer.AudienceMatchPolicy, fldPath.Child("audiences"), fldPath.Child("audienceMatchPolicy"), structuredAuthnFeatureEnabled)...) allErrs = append(allErrs, validateCertificateAuthority(issuer.CertificateAuthority, fldPath.Child("certificateAuthority"))...) return allErrs @@ -137,13 +134,17 @@ func validateIssuerURL(issuerURL string, disallowedIssuers sets.Set[string], fld return validateURL(issuerURL, disallowedIssuers, fldPath) } -func validateIssuerDiscoveryURL(issuerURL, issuerDiscoveryURL string, fldPath *field.Path) field.ErrorList { +func validateIssuerDiscoveryURL(issuerURL, issuerDiscoveryURL string, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { var allErrs field.ErrorList if len(issuerDiscoveryURL) == 0 { return nil } + if !structuredAuthnFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath, issuerDiscoveryURL, "discoveryURL is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(issuerURL) > 0 && strings.TrimRight(issuerURL, "/") == strings.TrimRight(issuerDiscoveryURL, "/") { allErrs = append(allErrs, field.Invalid(fldPath, issuerDiscoveryURL, "discoveryURL must be different from URL")) } @@ -181,7 +182,7 @@ func validateURL(issuerURL string, disallowedIssuers sets.Set[string], fldPath * return allErrs } -func validateAudiences(audiences []string, audienceMatchPolicy api.AudienceMatchPolicyType, fldPath, audienceMatchPolicyFldPath *field.Path) field.ErrorList { +func validateAudiences(audiences []string, audienceMatchPolicy api.AudienceMatchPolicyType, fldPath, audienceMatchPolicyFldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { var allErrs field.ErrorList if len(audiences) == 0 { @@ -189,6 +190,10 @@ func validateAudiences(audiences []string, audienceMatchPolicy api.AudienceMatch return allErrs } + if len(audiences) > 1 && !structuredAuthnFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath, audiences, "multiple audiences are not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + seenAudiences := sets.NewString() for i, audience := range audiences { fldPath := fldPath.Index(i) @@ -347,6 +352,11 @@ func validateClaimMappings(compiler authenticationcel.Compiler, state *validatio if mapping.Key != strings.ToLower(mapping.Key) { allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), mapping.Key, "key must be lowercase")) } + + if isKubernetesDomainPrefix(mapping.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), mapping.Key, "k8s.io, kubernetes.io and their subdomains are reserved for Kubernetes use")) + } + if seenExtraKeys.Has(mapping.Key) { allErrs = append(allErrs, field.Duplicate(fldPath.Child("key"), mapping.Key)) continue @@ -386,6 +396,24 @@ func validateClaimMappings(compiler authenticationcel.Compiler, state *validatio return allErrs } +func isKubernetesDomainPrefix(key string) bool { + domainPrefix := getDomainPrefix(key) + if domainPrefix == "kubernetes.io" || strings.HasSuffix(domainPrefix, ".kubernetes.io") { + return true + } + if domainPrefix == "k8s.io" || strings.HasSuffix(domainPrefix, ".k8s.io") { + return true + } + return false +} + +func getDomainPrefix(key string) string { + if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { + return parts[0] + } + return "" +} + func usesEmailClaim(ast *celgo.Ast) bool { return hasSelectExp(ast.Expr(), "claims", "email") } @@ -585,7 +613,7 @@ func compileUserCELExpression(compiler authenticationcel.Compiler, expression au } // ValidateAuthorizationConfiguration validates a given AuthorizationConfiguration. -func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.AuthorizationConfiguration, knownTypes sets.String, repeatableTypes sets.String) field.ErrorList { +func ValidateAuthorizationConfiguration(compiler authorizationcel.Compiler, fldPath *field.Path, c *api.AuthorizationConfiguration, knownTypes sets.Set[string], repeatableTypes sets.Set[string]) field.ErrorList { allErrs := field.ErrorList{} if len(c.Authorizers) == 0 { @@ -602,7 +630,7 @@ func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.Authorizatio continue } if !knownTypes.Has(aType) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), aType, knownTypes.List())) + allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), aType, sets.List(knownTypes))) continue } if seenAuthorizerTypes.Has(aType) && !repeatableTypes.Has(aType) { @@ -626,7 +654,7 @@ func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.Authorizatio allErrs = append(allErrs, field.Required(fldPath.Child("webhook"), "required when type=Webhook")) continue } - allErrs = append(allErrs, ValidateWebhookConfiguration(fldPath, a.Webhook)...) + allErrs = append(allErrs, ValidateWebhookConfiguration(compiler, fldPath, a.Webhook)...) default: if a.Webhook != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("webhook"), "non-null", "may only be specified when type=Webhook")) @@ -637,7 +665,7 @@ func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.Authorizatio return allErrs } -func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfiguration) field.ErrorList { +func ValidateWebhookConfiguration(compiler authorizationcel.Compiler, fldPath *field.Path, c *api.WebhookConfiguration) field.ErrorList { allErrs := field.ErrorList{} if c.Timeout.Duration == 0 { @@ -709,7 +737,7 @@ func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfigurati allErrs = append(allErrs, field.NotSupported(fldPath.Child("connectionInfo", "type"), c.ConnectionInfo, []string{api.AuthorizationWebhookConnectionInfoTypeInCluster, api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile})) } - _, errs := compileMatchConditions(c.MatchConditions, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) + _, errs := compileMatchConditions(compiler, c.MatchConditions, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) allErrs = append(allErrs, errs...) return allErrs @@ -717,11 +745,11 @@ func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfigurati // ValidateAndCompileMatchConditions validates a given webhook's matchConditions. // This is exported for use in authz package. -func ValidateAndCompileMatchConditions(matchConditions []api.WebhookMatchCondition) (*authorizationcel.CELMatcher, field.ErrorList) { - return compileMatchConditions(matchConditions, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) +func ValidateAndCompileMatchConditions(compiler authorizationcel.Compiler, matchConditions []api.WebhookMatchCondition) (*authorizationcel.CELMatcher, field.ErrorList) { + return compileMatchConditions(compiler, matchConditions, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) } -func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath *field.Path, structuredAuthzFeatureEnabled bool) (*authorizationcel.CELMatcher, field.ErrorList) { +func compileMatchConditions(compiler authorizationcel.Compiler, matchConditions []api.WebhookMatchCondition, fldPath *field.Path, structuredAuthzFeatureEnabled bool) (*authorizationcel.CELMatcher, field.ErrorList) { var allErrs field.ErrorList // should fail when match conditions are used without feature enabled if len(matchConditions) > 0 && !structuredAuthzFeatureEnabled { @@ -732,8 +760,6 @@ func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath return nil, allErrs } - // strictCost is set to true which enables the strict cost for CEL validation. - compiler := authorizationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) seenExpressions := sets.NewString() var compilationResults []authorizationcel.CompilationResult var usesFieldSelector, usesLabelSelector bool diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go index 76ef44732..b74b8b0d4 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -77,6 +77,7 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur c.RequestHeaderConfig.CAContentProvider.VerifyOptions, c.RequestHeaderConfig.AllowedClientNames, c.RequestHeaderConfig.UsernameHeaders, + c.RequestHeaderConfig.UIDHeaders, c.RequestHeaderConfig.GroupHeaders, c.RequestHeaderConfig.ExtraHeaderPrefixes, ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go index 766bde517..f217b94ef 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go @@ -24,6 +24,8 @@ import ( type RequestHeaderConfig struct { // UsernameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. UsernameHeaders headerrequest.StringSliceProvider + // UsernameHeaders are the headers to check (in order, case-insensitively) for an identity UID. The first header with a value wins. + UIDHeaders headerrequest.StringSliceProvider // GroupHeaders are the headers to check (case-insensitively) for a group names. All values will be used. GroupHeaders headerrequest.StringSliceProvider // ExtraHeaderPrefixes are the head prefixes to check (case-insentively) for filling in diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go index 5550955af..8c74e7ad4 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go @@ -39,6 +39,12 @@ type compiler struct { varEnvs map[string]*environment.EnvSet } +// NewDefaultCompiler returns a new Compiler following the default compatibility version. +// Note: the compiler construction depends on feature gates and the compatibility version to be initialized. +func NewDefaultCompiler() Compiler { + return NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) +} + // NewCompiler returns a new Compiler. func NewCompiler(env *environment.EnvSet) Compiler { return &compiler{ diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go index 182616393..57bf9ca30 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -53,6 +53,9 @@ type requestHeaderAuthRequestHandler struct { // nameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. nameHeaders StringSliceProvider + // nameHeaders are the headers to check (in order, case-insensitively) for an identity UID. The first header with a value wins. + uidHeaders StringSliceProvider + // groupHeaders are the headers to check (case-insensitively) for group membership. All values of all headers will be added. groupHeaders StringSliceProvider @@ -61,11 +64,15 @@ type requestHeaderAuthRequestHandler struct { extraHeaderPrefixes StringSliceProvider } -func New(nameHeaders, groupHeaders, extraHeaderPrefixes []string) (authenticator.Request, error) { +func New(nameHeaders, uidHeaders, groupHeaders, extraHeaderPrefixes []string) (authenticator.Request, error) { trimmedNameHeaders, err := trimHeaders(nameHeaders...) if err != nil { return nil, err } + trimmedUIDHeaders, err := trimHeaders(uidHeaders...) + if err != nil { + return nil, err + } trimmedGroupHeaders, err := trimHeaders(groupHeaders...) if err != nil { return nil, err @@ -77,14 +84,16 @@ func New(nameHeaders, groupHeaders, extraHeaderPrefixes []string) (authenticator return NewDynamic( StaticStringSlice(trimmedNameHeaders), + StaticStringSlice(trimmedUIDHeaders), StaticStringSlice(trimmedGroupHeaders), StaticStringSlice(trimmedExtraHeaderPrefixes), ), nil } -func NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { +func NewDynamic(nameHeaders, uidHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { return &requestHeaderAuthRequestHandler{ nameHeaders: nameHeaders, + uidHeaders: uidHeaders, groupHeaders: groupHeaders, extraHeaderPrefixes: extraHeaderPrefixes, } @@ -103,8 +112,8 @@ func trimHeaders(headerNames ...string) ([]string, error) { return ret, nil } -func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { - headerAuthenticator := NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes) +func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, uidHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { + headerAuthenticator := NewDynamic(nameHeaders, uidHeaders, groupHeaders, extraHeaderPrefixes) return x509request.NewDynamicCAVerifier(verifyOptionFn, headerAuthenticator, proxyClientNames) } @@ -114,25 +123,30 @@ func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) if len(name) == 0 { return nil, false, nil } + uid := headerValue(req.Header, a.uidHeaders.Value()) groups := allHeaderValues(req.Header, a.groupHeaders.Value()) extra := newExtra(req.Header, a.extraHeaderPrefixes.Value()) // clear headers used for authentication - ClearAuthenticationHeaders(req.Header, a.nameHeaders, a.groupHeaders, a.extraHeaderPrefixes) + ClearAuthenticationHeaders(req.Header, a.nameHeaders, a.uidHeaders, a.groupHeaders, a.extraHeaderPrefixes) return &authenticator.Response{ User: &user.DefaultInfo{ Name: name, + UID: uid, Groups: groups, Extra: extra, }, }, true, nil } -func ClearAuthenticationHeaders(h http.Header, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) { +func ClearAuthenticationHeaders(h http.Header, nameHeaders, uidHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) { for _, headerName := range nameHeaders.Value() { h.Del(headerName) } + for _, headerName := range uidHeaders.Value() { + h.Del(headerName) + } for _, headerName := range groupHeaders.Value() { h.Del(headerName) } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go index dc844ee73..38d6cbe71 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go @@ -45,6 +45,7 @@ const ( // RequestHeaderAuthRequestProvider a provider that knows how to dynamically fill parts of RequestHeaderConfig struct type RequestHeaderAuthRequestProvider interface { UsernameHeaders() []string + UIDHeaders() []string GroupHeaders() []string ExtraHeaderPrefixes() []string AllowedClientNames() []string @@ -54,6 +55,7 @@ var _ RequestHeaderAuthRequestProvider = &RequestHeaderAuthRequestController{} type requestHeaderBundle struct { UsernameHeaders []string + UIDHeaders []string GroupHeaders []string ExtraHeaderPrefixes []string AllowedClientNames []string @@ -80,6 +82,7 @@ type RequestHeaderAuthRequestController struct { exportedRequestHeaderBundle atomic.Value usernameHeadersKey string + uidHeadersKey string groupHeadersKey string extraHeaderPrefixesKey string allowedClientNamesKey string @@ -90,7 +93,7 @@ func NewRequestHeaderAuthRequestController( cmName string, cmNamespace string, client kubernetes.Interface, - usernameHeadersKey, groupHeadersKey, extraHeaderPrefixesKey, allowedClientNamesKey string) *RequestHeaderAuthRequestController { + usernameHeadersKey, uidHeadersKey, groupHeadersKey, extraHeaderPrefixesKey, allowedClientNamesKey string) *RequestHeaderAuthRequestController { c := &RequestHeaderAuthRequestController{ name: "RequestHeaderAuthRequestController", @@ -100,6 +103,7 @@ func NewRequestHeaderAuthRequestController( configmapNamespace: cmNamespace, usernameHeadersKey: usernameHeadersKey, + uidHeadersKey: uidHeadersKey, groupHeadersKey: groupHeadersKey, extraHeaderPrefixesKey: extraHeaderPrefixesKey, allowedClientNamesKey: allowedClientNamesKey, @@ -152,6 +156,10 @@ func (c *RequestHeaderAuthRequestController) UsernameHeaders() []string { return c.loadRequestHeaderFor(c.usernameHeadersKey) } +func (c *RequestHeaderAuthRequestController) UIDHeaders() []string { + return c.loadRequestHeaderFor(c.uidHeadersKey) +} + func (c *RequestHeaderAuthRequestController) GroupHeaders() []string { return c.loadRequestHeaderFor(c.groupHeadersKey) } @@ -278,6 +286,11 @@ func (c *RequestHeaderAuthRequestController) getRequestHeaderBundleFromConfigMap return nil, err } + uidHeaderCurrentValue, err := deserializeStrings(cm.Data[c.uidHeadersKey]) + if err != nil { + return nil, err + } + groupHeadersCurrentValue, err := deserializeStrings(cm.Data[c.groupHeadersKey]) if err != nil { return nil, err @@ -296,6 +309,7 @@ func (c *RequestHeaderAuthRequestController) getRequestHeaderBundleFromConfigMap return &requestHeaderBundle{ UsernameHeaders: usernameHeaderCurrentValue, + UIDHeaders: uidHeaderCurrentValue, GroupHeaders: groupHeadersCurrentValue, ExtraHeaderPrefixes: extraHeaderPrefixesCurrentValue, AllowedClientNames: allowedClientNamesCurrentValue, @@ -312,6 +326,8 @@ func (c *RequestHeaderAuthRequestController) loadRequestHeaderFor(key string) [] switch key { case c.usernameHeadersKey: return headerBundle.UsernameHeaders + case c.uidHeadersKey: + return headerBundle.UIDHeaders case c.groupHeadersKey: return headerBundle.GroupHeaders case c.extraHeaderPrefixesKey: diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index d67c53547..bdc1b1790 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -17,6 +17,7 @@ limitations under the License. package x509 import ( + "crypto/sha256" "crypto/x509" "crypto/x509/pkix" "encoding/hex" @@ -276,10 +277,17 @@ var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate if len(chain[0].Subject.CommonName) == 0 { return nil, false, nil } + + fp := sha256.Sum256(chain[0].Raw) + id := "X509SHA256=" + hex.EncodeToString(fp[:]) + return &authenticator.Response{ User: &user.DefaultInfo{ Name: chain[0].Subject.CommonName, Groups: chain[0].Subject.Organization, + Extra: map[string][]string{ + user.CredentialIDKey: {id}, + }, }, }, true, nil }) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go index 3e29d4e71..dd11efbde 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -17,18 +17,12 @@ limitations under the License. package serviceaccount import ( - "context" "fmt" "strings" v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/user" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - - "k8s.io/klog/v2" ) const ( @@ -36,9 +30,6 @@ const ( ServiceAccountUsernameSeparator = ":" ServiceAccountGroupPrefix = "system:serviceaccounts:" AllServiceAccountsGroup = "system:serviceaccounts" - // CredentialIDKey is the key used in a user's "extra" to specify the unique - // identifier for this identity document). - CredentialIDKey = "authentication.kubernetes.io/credential-id" // IssuedCredentialIDAuditAnnotationKey is the annotation key used in the audit event that is persisted to the // '/token' endpoint for service accounts. // This annotation indicates the generated credential identifier for the service account token being issued. @@ -156,7 +147,7 @@ func (sa *ServiceAccountInfo) UserInfo() user.Info { if info.Extra == nil { info.Extra = make(map[string][]string) } - info.Extra[CredentialIDKey] = []string{sa.CredentialID} + info.Extra[user.CredentialIDKey] = []string{sa.CredentialID} } if sa.NodeName != "" { if info.Extra == nil { @@ -172,15 +163,6 @@ func (sa *ServiceAccountInfo) UserInfo() user.Info { return info } -// CredentialIDForJTI converts a given JTI string into a credential identifier for use in a -// users 'extra' info. -func CredentialIDForJTI(jti string) string { - if len(jti) == 0 { - return "" - } - return "JTI=" + jti -} - // IsServiceAccountToken returns true if the secret is a valid api token for the service account func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { if secret.Type != v1.SecretTypeServiceAccountToken { @@ -200,29 +182,3 @@ func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { return true } - -func GetOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { - sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err == nil { - return sa, nil - } - if !apierrors.IsNotFound(err) { - return nil, err - } - - // Create the namespace if we can't verify it exists. - // Tolerate errors, since we don't know whether this component has namespace creation permissions. - if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { - if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { - klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) - } - } - - // Create the service account - sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) - if apierrors.IsAlreadyExists(err) { - // If we're racing to init and someone else already created it, re-fetch - return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - } - return sa, err -} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go index 4d6ec0980..1af6f2b27 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -66,8 +66,8 @@ func (i *DefaultInfo) GetExtra() map[string][]string { return i.Extra } -// well-known user and group names const ( + // well-known user and group names SystemPrivilegedGroup = "system:masters" NodesGroup = "system:nodes" MonitoringGroup = "system:monitoring" @@ -81,4 +81,8 @@ const ( KubeProxy = "system:kube-proxy" KubeControllerManager = "system:kube-controller-manager" KubeScheduler = "system:kube-scheduler" + + // CredentialIDKey is the key used in a user's "extra" to specify the unique + // identifier for this identity document). + CredentialIDKey = "authentication.kubernetes.io/credential-id" ) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go index d39deb17e..2f5f65e22 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -92,7 +92,7 @@ func (f AuthorizerFunc) Authorize(ctx context.Context, a Attributes) (Decision, // RuleResolver provides a mechanism for resolving the list of rules that apply to a given user within a namespace. type RuleResolver interface { // RulesFor get the list of cluster wide rules, the list of rules in the specific namespace, incomplete status and errors. - RulesFor(user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error) + RulesFor(ctx context.Context, user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error) } // RequestAttributesGetter provides a function that extracts Attributes from an http.Request diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go index 6fe3fa96e..b3b1f09a6 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go @@ -33,7 +33,7 @@ func (alwaysAllowAuthorizer) Authorize(ctx context.Context, a authorizer.Attribu return authorizer.DecisionAllow, "", nil } -func (alwaysAllowAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { +func (alwaysAllowAuthorizer) RulesFor(ctx context.Context, user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { return []authorizer.ResourceRuleInfo{ &authorizer.DefaultResourceRuleInfo{ Verbs: []string{"*"}, @@ -61,7 +61,7 @@ func (alwaysDenyAuthorizer) Authorize(ctx context.Context, a authorizer.Attribut return authorizer.DecisionNoOpinion, "Everything is forbidden.", nil } -func (alwaysDenyAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { +func (alwaysDenyAuthorizer) RulesFor(ctx context.Context, user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { return []authorizer.ResourceRuleInfo{}, []authorizer.NonResourceRuleInfo{}, false, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go index 070189145..483aedd42 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authorization/authorizer" + authorizationcel "k8s.io/apiserver/pkg/authorization/cel" "k8s.io/apiserver/plugin/pkg/authorizer/webhook" authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" ) @@ -31,6 +32,9 @@ import ( type DelegatingAuthorizerConfig struct { SubjectAccessReviewClient authorizationclient.AuthorizationV1Interface + // Compiler is the CEL compiler to use for evaluating policies. If nil, a default compiler will be used. + Compiler authorizationcel.Compiler + // AllowCacheTTL is the length of time that a successful authorization response will be cached AllowCacheTTL time.Duration @@ -48,6 +52,10 @@ func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { if c.WebhookRetryBackoff == nil { return nil, errors.New("retry backoff parameters for delegating authorization webhook has not been specified") } + compiler := c.Compiler + if compiler == nil { + compiler = authorizationcel.NewDefaultCompiler() + } return webhook.NewFromInterface( c.SubjectAccessReviewClient, @@ -56,5 +64,6 @@ func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { *c.WebhookRetryBackoff, authorizer.DecisionNoOpinion, NewDelegatingAuthorizerMetrics(), + compiler, ) } diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go index 829ff91d3..765864846 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go @@ -65,6 +65,12 @@ type compiler struct { envSet *environment.EnvSet } +// NewDefaultCompiler returns a new Compiler following the default compatibility version. +// Note: the compiler construction depends on feature gates and the compatibility version to be initialized. +func NewDefaultCompiler() Compiler { + return NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) +} + // NewCompiler returns a new Compiler. func NewCompiler(env *environment.EnvSet) Compiler { return &compiler{ diff --git a/vendor/k8s.io/apiserver/pkg/authorization/union/union.go b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go index 460d9a4ab..0e5007cfa 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/union/union.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go @@ -77,7 +77,7 @@ func NewRuleResolvers(authorizationHandlers ...authorizer.RuleResolver) authoriz } // RulesFor against a chain of authorizer.RuleResolver objects and returns nil if successful and returns error if unsuccessful -func (authzHandler unionAuthzRulesHandler) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { +func (authzHandler unionAuthzRulesHandler) RulesFor(ctx context.Context, user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { var ( errList []error resourceRulesList []authorizer.ResourceRuleInfo @@ -86,7 +86,7 @@ func (authzHandler unionAuthzRulesHandler) RulesFor(user user.Info, namespace st incompleteStatus := false for _, currAuthzHandler := range authzHandler { - resourceRules, nonResourceRules, incomplete, err := currAuthzHandler.RulesFor(user, namespace) + resourceRules, nonResourceRules, incomplete, err := currAuthzHandler.RulesFor(ctx, user, namespace) if incomplete { incompleteStatus = true diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go b/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go new file mode 100644 index 000000000..685a585c7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go @@ -0,0 +1,127 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// TypeResolver resolves a type by a given name. +type TypeResolver interface { + // Resolve resolves the type by its name. + // This function returns false if the name does not refer to a known object type. + Resolve(name string) (ResolvedType, bool) +} + +// ResolvedType refers an object type that can be looked up for its fields. +type ResolvedType interface { + ref.Type + + Type() *types.Type + + // Field finds the field by the field name, or false if the field is not known. + // This function directly return a FieldType that is known to CEL to be more customizable. + Field(name string) (*types.FieldType, bool) + + // FieldNames returns the field names associated with the type, if the type + // is found. + FieldNames() ([]string, bool) + + // Val creates an instance for the ResolvedType, given its fields and their values. + Val(fields map[string]ref.Val) ref.Val +} + +// ResolverTypeProvider delegates type resolution first to the TypeResolver and then +// to the underlying types.Provider for types not resolved by the TypeResolver. +type ResolverTypeProvider struct { + typeResolver TypeResolver + underlyingTypeProvider types.Provider +} + +var _ types.Provider = (*ResolverTypeProvider)(nil) + +// FindStructType returns the Type give a qualified type name, by looking it up with +// the DynamicTypeResolver and translating it to CEL Type. +// If the type is not known to the DynamicTypeResolver, the lookup falls back to the underlying +// ResolverTypeProvider instead. +func (p *ResolverTypeProvider) FindStructType(structType string) (*types.Type, bool) { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return types.NewTypeTypeWithParam(t.Type()), true + } + return p.underlyingTypeProvider.FindStructType(structType) +} + +// FindStructFieldNames returns the field names associated with the type, if the type +// is found. +func (p *ResolverTypeProvider) FindStructFieldNames(structType string) ([]string, bool) { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return t.FieldNames() + } + return p.underlyingTypeProvider.FindStructFieldNames(structType) +} + +// FindStructFieldType returns the field type for a checked type value. +// Returns false if the field could not be found. +func (p *ResolverTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return t.Field(fieldName) + } + return p.underlyingTypeProvider.FindStructFieldType(structType, fieldName) +} + +// NewValue creates a new type value from a qualified name and map of fields. +func (p *ResolverTypeProvider) NewValue(structType string, fields map[string]ref.Val) ref.Val { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return t.Val(fields) + } + return p.underlyingTypeProvider.NewValue(structType, fields) +} + +func (p *ResolverTypeProvider) EnumValue(enumName string) ref.Val { + return p.underlyingTypeProvider.EnumValue(enumName) +} + +func (p *ResolverTypeProvider) FindIdent(identName string) (ref.Val, bool) { + return p.underlyingTypeProvider.FindIdent(identName) +} + +// ResolverEnvOption creates the ResolverTypeProvider with a given DynamicTypeResolver, +// and also returns the CEL ResolverEnvOption to apply it to the env. +func ResolverEnvOption(resolver TypeResolver) cel.EnvOption { + _, envOpt := NewResolverTypeProviderAndEnvOption(resolver) + return envOpt +} + +// NewResolverTypeProviderAndEnvOption creates the ResolverTypeProvider with a given DynamicTypeResolver, +// and also returns the CEL ResolverEnvOption to apply it to the env. +func NewResolverTypeProviderAndEnvOption(resolver TypeResolver) (*ResolverTypeProvider, cel.EnvOption) { + tp := &ResolverTypeProvider{typeResolver: resolver} + var envOption cel.EnvOption = func(e *cel.Env) (*cel.Env, error) { + // wrap the existing type provider (acquired from the env) + // and set new type provider for the env. + tp.underlyingTypeProvider = e.CELTypeProvider() + typeProviderOption := cel.CustomTypeProvider(tp) + return typeProviderOption(e) + } + return tp, envOption +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index 563d34e13..b210377ae 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -33,7 +33,8 @@ import ( "k8s.io/apiserver/pkg/cel/library" genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" - utilversion "k8s.io/apiserver/pkg/util/version" + "k8s.io/component-base/featuregate" + utilversion "k8s.io/component-base/version" ) // DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet @@ -49,7 +50,7 @@ import ( // A default version number equal to the current Kubernetes major.minor version // indicates fast forward CEL features that can be used when rollback is no longer needed. func DefaultCompatibilityVersion() *version.Version { - effectiveVer := utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent) + effectiveVer := featuregate.DefaultComponentGlobalsRegistry.EffectiveVersionFor(featuregate.DefaultKubeComponent) if effectiveVer == nil { effectiveVer = utilversion.DefaultKubeEffectiveVersion() } @@ -71,9 +72,9 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true), - library.URLs(), - library.Regex(), - library.Lists(), + UnversionedLib(library.URLs), + UnversionedLib(library.Regex), + UnversionedLib(library.Lists), // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. @@ -91,7 +92,7 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ { IntroducedVersion: version.MajorMinor(1, 27), EnvOptions: []cel.EnvOption{ - library.Authz(), + UnversionedLib(library.Authz), }, }, { @@ -99,7 +100,7 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ EnvOptions: []cel.EnvOption{ cel.CrossTypeNumericComparisons(true), cel.OptionalTypes(), - library.Quantity(), + UnversionedLib(library.Quantity), }, }, // add the new validator in 1.29 @@ -138,15 +139,15 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ { IntroducedVersion: version.MajorMinor(1, 30), EnvOptions: []cel.EnvOption{ - library.IP(), - library.CIDR(), + UnversionedLib(library.IP), + UnversionedLib(library.CIDR), }, }, // Format Library { IntroducedVersion: version.MajorMinor(1, 31), EnvOptions: []cel.EnvOption{ - library.Format(), + UnversionedLib(library.Format), }, }, // Authz selectors @@ -165,7 +166,14 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ return enabled }, EnvOptions: []cel.EnvOption{ - library.AuthzSelectors(), + UnversionedLib(library.AuthzSelectors), + }, + }, + // Two variable comprehensions + { + IntroducedVersion: version.MajorMinor(1, 32), + EnvOptions: []cel.EnvOption{ + UnversionedLib(ext.TwoVarComprehensions), }, }, } @@ -191,6 +199,19 @@ var StrictCostOpt = VersionedOptions{ }, } +// cacheBaseEnvs controls whether calls to MustBaseEnvSet are cached. +// Defaults to true, may be disabled by calling DisableBaseEnvSetCachingForTests. +var cacheBaseEnvs = true + +// DisableBaseEnvSetCachingForTests clears and disables base env caching. +// This is only intended for unit tests exercising MustBaseEnvSet directly with different enablement options. +// It does not clear other initialization paths that may cache results of calling MustBaseEnvSet. +func DisableBaseEnvSetCachingForTests() { + cacheBaseEnvs = false + baseEnvs.Clear() + baseEnvsWithOption.Clear() +} + // MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics // if the version is nil, or does not have major and minor components. // @@ -216,7 +237,9 @@ func MustBaseEnvSet(ver *version.Version, strictCost bool) *EnvSet { } entry, _, _ = baseEnvsSingleflight.Do(key, func() (interface{}, error) { entry := mustNewEnvSet(ver, baseOpts) - baseEnvs.Store(key, entry) + if cacheBaseEnvs { + baseEnvs.Store(key, entry) + } return entry, nil }) } else { @@ -225,7 +248,9 @@ func MustBaseEnvSet(ver *version.Version, strictCost bool) *EnvSet { } entry, _, _ = baseEnvsWithOptionSingleflight.Do(key, func() (interface{}, error) { entry := mustNewEnvSet(ver, baseOptsWithoutStrictCost) - baseEnvsWithOption.Store(key, entry) + if cacheBaseEnvs { + baseEnvsWithOption.Store(key, entry) + } return entry, nil }) } @@ -239,3 +264,20 @@ var ( baseEnvsSingleflight = &singleflight.Group{} baseEnvsWithOptionSingleflight = &singleflight.Group{} ) + +// UnversionedLib wraps library initialization calls like ext.Sets() or library.IP() +// to force compilation errors if the call evolves to include a varadic variable option. +// +// This provides automatic detection of a problem that is hard to catch in review-- +// If a CEL library used in Kubernetes is unversioned and then become versioned, and we +// fail to set a desired version, the libraries defaults to the latest version, changing +// CEL environment without controlled rollout, bypassing the entire purpose of the base +// environment. +// +// If usages of this function fail to compile: add version=1 argument to all call sites +// that fail compilation while removing the UnversionedLib wrapper. Next, review +// the changes in the library present in higher versions and, if needed, use VersionedOptions to +// the base environment to roll out to a newer version safely. +func UnversionedLib(initializer func() cel.EnvOption) cel.EnvOption { + return initializer() +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/format.go b/vendor/k8s.io/apiserver/pkg/cel/format.go index 1bcfddfe7..31216806f 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/format.go +++ b/vendor/k8s.io/apiserver/pkg/cel/format.go @@ -41,11 +41,11 @@ type Format struct { MaxRegexSize int } -func (d *Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { +func (d Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { return nil, fmt.Errorf("type conversion error from 'Format' to '%v'", typeDesc) } -func (d *Format) ConvertToType(typeVal ref.Type) ref.Val { +func (d Format) ConvertToType(typeVal ref.Type) ref.Val { switch typeVal { case FormatType: return d @@ -56,18 +56,18 @@ func (d *Format) ConvertToType(typeVal ref.Type) ref.Val { } } -func (d *Format) Equal(other ref.Val) ref.Val { - otherDur, ok := other.(*Format) +func (d Format) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Format) if !ok { return types.MaybeNoSuchOverloadErr(other) } return types.Bool(d.Name == otherDur.Name) } -func (d *Format) Type() ref.Type { +func (d Format) Type() ref.Type { return FormatType } -func (d *Format) Value() interface{} { +func (d Format) Value() interface{} { return d } diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go index 1fd489fc9..77332cff8 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go @@ -232,7 +232,20 @@ var authzLib = &authz{} type authz struct{} func (*authz) LibraryName() string { - return "k8s.authz" + return "kubernetes.authz" +} + +func (*authz) Types() []*cel.Type { + return []*cel.Type{ + AuthorizerType, + PathCheckType, + GroupCheckType, + ResourceCheckType, + DecisionType} +} + +func (*authz) declarations() map[string][]cel.FunctionOpt { + return authzLibraryDecls } var authzLibraryDecls = map[string][]cel.FunctionOpt{ @@ -324,7 +337,15 @@ var authzSelectorsLib = &authzSelectors{} type authzSelectors struct{} func (*authzSelectors) LibraryName() string { - return "k8s.authzSelectors" + return "kubernetes.authzSelectors" +} + +func (*authzSelectors) Types() []*cel.Type { + return []*cel.Type{ResourceCheckType} +} + +func (*authzSelectors) declarations() map[string][]cel.FunctionOpt { + return authzSelectorsLibraryDecls } var authzSelectorsLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go b/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go index c4259daed..8ab444cac 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go @@ -109,7 +109,15 @@ var cidrsLib = &cidrs{} type cidrs struct{} func (*cidrs) LibraryName() string { - return "net.cidr" + return "kubernetes.net.cidr" +} + +func (*cidrs) declarations() map[string][]cel.FunctionOpt { + return cidrLibraryDecls +} + +func (*cidrs) Types() []*cel.Type { + return []*cel.Type{apiservercel.CIDRType, apiservercel.IPType} } var cidrLibraryDecls = map[string][]cel.FunctionOpt{ @@ -223,8 +231,7 @@ func cidrContainsCIDR(arg ref.Val, other ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(other) } - equalMasked := cidr.Prefix.Masked() == netip.PrefixFrom(containsCIDR.Prefix.Addr(), cidr.Prefix.Bits()) - return types.Bool(equalMasked && cidr.Prefix.Bits() <= containsCIDR.Prefix.Bits()) + return types.Bool(cidr.Overlaps(containsCIDR.Prefix) && cidr.Prefix.Bits() <= containsCIDR.Prefix.Bits()) } func prefixLength(arg ref.Val) ref.Val { diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go index b71686309..a9e5db811 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go @@ -18,14 +18,13 @@ package library import ( "fmt" - "math" - "github.com/google/cel-go/checker" "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "math" "k8s.io/apiserver/pkg/cel" ) @@ -36,16 +35,25 @@ var panicOnUnknown = false // builtInFunctions is a list of functions used in cost tests that are not handled by CostEstimator. var knownUnhandledFunctions = map[string]bool{ - "uint": true, - "duration": true, - "bytes": true, - "timestamp": true, - "value": true, - "_==_": true, - "_&&_": true, - "_>_": true, - "!_": true, - "strings.quote": true, + "@not_strictly_false": true, + "uint": true, + "duration": true, + "bytes": true, + "cel.@mapInsert": true, + "timestamp": true, + "strings.quote": true, + "value": true, + "_==_": true, + "_&&_": true, + "_||_": true, + "_>_": true, + "_>=_": true, + "_<_": true, + "_<=_": true, + "!_": true, + "_?_:_": true, + "_+_": true, + "_-_": true, } // CostEstimator implements CEL's interpretable.ActualCostEstimator and checker.CostEstimator. @@ -98,7 +106,7 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re cost += traversalCost(args[0]) // these O(n) operations all cost roughly the cost of a single traversal } return &cost - case "url", "lowerAscii", "upperAscii", "substring", "trim": + case "url", "lowerAscii", "upperAscii", "substring", "trim", "jsonpatch.escapeKey": if len(args) >= 1 { cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) return &cost @@ -201,7 +209,7 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re } case "validate": if len(args) >= 2 { - format, isFormat := args[0].Value().(*cel.Format) + format, isFormat := args[0].Value().(cel.Format) if isFormat { strSize := actualSize(args[1]) @@ -235,6 +243,26 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re // url accessors cost := uint64(1) return &cost + case "_==_": + if len(args) == 2 { + unitCost := uint64(1) + lhs := args[0] + switch lhs.(type) { + case *cel.Quantity, cel.Quantity, + *cel.IP, cel.IP, + *cel.CIDR, cel.CIDR, + *cel.Format, cel.Format, // Formats have a small max size. Format takes pointer receiver. + *cel.URL, cel.URL, // TODO: Computing the actual cost is expensive, and changing this would be a breaking change + *cel.Semver, cel.Semver, + *authorizerVal, authorizerVal, *pathCheckVal, pathCheckVal, *groupCheckVal, groupCheckVal, + *resourceCheckVal, resourceCheckVal, *decisionVal, decisionVal: + return &unitCost + default: + if panicOnUnknown && lhs.Type() != nil && isRegisteredType(lhs.Type().TypeName()) { + panic(fmt.Errorf("CallCost: unhandled equality for Kubernetes type %T", lhs)) + } + } + } } if panicOnUnknown && !knownUnhandledFunctions[function] { panic(fmt.Errorf("CallCost: unhandled function %q or args %v", function, args)) @@ -275,10 +303,10 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch return &checker.CallEstimate{CostEstimate: l.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)} } } - case "url": + case "url", "jsonpatch.escapeKey": if len(args) == 1 { sz := l.sizeEstimate(args[0]) - return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz} } case "lowerAscii", "upperAscii", "substring", "trim": if target != nil { @@ -475,6 +503,40 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": // url accessors return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "_==_": + if len(args) == 2 { + lhs := args[0] + rhs := args[1] + if lhs.Type().Equal(rhs.Type()) == types.True { + t := lhs.Type() + if t.Kind() == types.OpaqueKind { + switch t.TypeName() { + case cel.IPType.TypeName(), cel.CIDRType.TypeName(): + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + } + } + if t.Kind() == types.StructKind { + switch t { + case cel.QuantityType, AuthorizerType, PathCheckType, // O(1) cost equality checks + GroupCheckType, ResourceCheckType, DecisionType, cel.SemverType: + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case cel.FormatType: + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: cel.MaxFormatSize}.MultiplyByCostFactor(common.StringTraversalCostFactor)} + case cel.URLType: + size := checker.SizeEstimate{Min: 1, Max: 1} + rhSize := rhs.ComputedSize() + lhSize := rhs.ComputedSize() + if rhSize != nil && lhSize != nil { + size = rhSize.Union(*lhSize) + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: size.Max}.MultiplyByCostFactor(common.StringTraversalCostFactor)} + } + } + if panicOnUnknown && isRegisteredType(t.TypeName()) { + panic(fmt.Errorf("EstimateCallCost: unhandled equality for Kubernetes type %v", t)) + } + } + } } if panicOnUnknown && !knownUnhandledFunctions[function] { panic(fmt.Errorf("EstimateCallCost: unhandled function %q, target %v, args %v", function, target, args)) diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/format.go b/vendor/k8s.io/apiserver/pkg/cel/library/format.go index c051f33c0..82ecffb41 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/format.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/format.go @@ -25,6 +25,7 @@ import ( "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" "k8s.io/apimachinery/pkg/util/validation" apiservercel "k8s.io/apiserver/pkg/cel" @@ -90,7 +91,15 @@ var formatLib = &format{} type format struct{} func (*format) LibraryName() string { - return "format" + return "kubernetes.format" +} + +func (*format) Types() []*cel.Type { + return []*cel.Type{apiservercel.FormatType} +} + +func (*format) declarations() map[string][]cel.FunctionOpt { + return formatLibraryDecls } func ZeroArgumentFunctionBinding(binding func() ref.Val) decls.OverloadOpt { @@ -124,7 +133,7 @@ func (*format) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -var ConstantFormats map[string]*apiservercel.Format = map[string]*apiservercel.Format{ +var ConstantFormats = map[string]apiservercel.Format{ "dns1123Label": { Name: "DNS1123Label", ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, false) }, @@ -252,7 +261,7 @@ var formatLibraryDecls = map[string][]cel.FunctionOpt{ } func formatValidate(arg1, arg2 ref.Val) ref.Val { - f, ok := arg1.Value().(*apiservercel.Format) + f, ok := arg1.Value().(apiservercel.Format) if !ok { return types.MaybeNoSuchOverloadErr(arg1) } diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/ip.go b/vendor/k8s.io/apiserver/pkg/cel/library/ip.go index cdfeb1daf..8edc4463a 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/ip.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/ip.go @@ -132,7 +132,15 @@ var ipLib = &ip{} type ip struct{} func (*ip) LibraryName() string { - return "net.ip" + return "kubernetes.net.ip" +} + +func (*ip) declarations() map[string][]cel.FunctionOpt { + return ipLibraryDecls +} + +func (*ip) Types() []*cel.Type { + return []*cel.Type{apiservercel.IPType} } var ipLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go b/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go new file mode 100644 index 000000000..bdcb6d852 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "strings" +) + +// JSONPatch provides a CEL function library extension of JSONPatch functions. +// +// jsonpatch.escapeKey +// +// Escapes a string for use as a JSONPatch path key. +// +// jsonpatch.escapeKey() +// +// Examples: +// +// "/metadata/labels/" + jsonpatch.escapeKey('k8s.io/my~label') // returns "/metadata/labels/k8s.io~1my~0label" +func JSONPatch() cel.EnvOption { + return cel.Lib(jsonPatchLib) +} + +var jsonPatchLib = &jsonPatch{} + +type jsonPatch struct{} + +func (*jsonPatch) LibraryName() string { + return "kubernetes.jsonpatch" +} + +func (*jsonPatch) declarations() map[string][]cel.FunctionOpt { + return jsonPatchLibraryDecls +} + +func (*jsonPatch) Types() []*cel.Type { + return []*cel.Type{} +} + +var jsonPatchLibraryDecls = map[string][]cel.FunctionOpt{ + "jsonpatch.escapeKey": { + cel.Overload("string_jsonpatch_escapeKey_string", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(escape)), + }, +} + +func (*jsonPatch) CompileOptions() []cel.EnvOption { + var options []cel.EnvOption + for name, overloads := range jsonPatchLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*jsonPatch) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +var jsonPatchReplacer = strings.NewReplacer("/", "~1", "~", "~0") + +func escapeKey(k string) string { + return jsonPatchReplacer.Replace(k) +} + +func escape(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + escaped := escapeKey(s) + return types.String(escaped) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go b/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go new file mode 100644 index 000000000..dc436973e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go @@ -0,0 +1,61 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "github.com/google/cel-go/cel" +) + +// Library represents a CEL library used by kubernetes. +type Library interface { + // SingletonLibrary provides the library name and ensures the library can be safely registered into environments. + cel.SingletonLibrary + + // Types provides all custom types introduced by the library. + Types() []*cel.Type + + // declarations returns all function declarations provided by the library. + declarations() map[string][]cel.FunctionOpt +} + +// KnownLibraries returns all libraries used in Kubernetes. +func KnownLibraries() []Library { + return []Library{ + authzLib, + authzSelectorsLib, + listsLib, + regexLib, + urlsLib, + quantityLib, + ipLib, + cidrsLib, + formatLib, + semverLib, + jsonPatchLib, + } +} + +func isRegisteredType(typeName string) bool { + for _, lib := range KnownLibraries() { + for _, rt := range lib.Types() { + if rt.TypeName() == typeName { + return true + } + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go index 327ec93d6..1f61b1181 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go @@ -96,7 +96,15 @@ var listsLib = &lists{} type lists struct{} func (*lists) LibraryName() string { - return "k8s.lists" + return "kubernetes.lists" +} + +func (*lists) Types() []*cel.Type { + return []*cel.Type{} +} + +func (*lists) declarations() map[string][]cel.FunctionOpt { + return listsLibraryDecls } var paramA = cel.TypeParamType("A") diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go index b4ac91c8a..236b366b4 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go @@ -143,7 +143,15 @@ var quantityLib = &quantity{} type quantity struct{} func (*quantity) LibraryName() string { - return "k8s.quantity" + return "kubernetes.quantity" +} + +func (*quantity) Types() []*cel.Type { + return []*cel.Type{apiservercel.QuantityType} +} + +func (*quantity) declarations() map[string][]cel.FunctionOpt { + return quantityLibraryDecls } var quantityLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go index 147a40f9b..2cf8b0037 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go @@ -52,7 +52,15 @@ var regexLib = ®ex{} type regex struct{} func (*regex) LibraryName() string { - return "k8s.regex" + return "kubernetes.regex" +} + +func (*regex) Types() []*cel.Type { + return []*cel.Type{} +} + +func (*regex) declarations() map[string][]cel.FunctionOpt { + return regexLibraryDecls } var regexLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go b/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go new file mode 100644 index 000000000..d8c79ae02 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go @@ -0,0 +1,247 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "github.com/blang/semver/v4" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// Semver provides a CEL function library extension for [semver.Version]. +// +// semver +// +// Converts a string to a semantic version or results in an error if the string is not a valid semantic version. Refer +// to semver.org documentation for information on accepted patterns. +// +// semver() +// +// Examples: +// +// semver('1.0.0') // returns a Semver +// semver('0.1.0-alpha.1') // returns a Semver +// semver('200K') // error +// semver('Three') // error +// semver('Mi') // error +// +// isSemver +// +// Returns true if a string is a valid Semver. isSemver returns true if and +// only if semver does not result in error. +// +// isSemver( ) +// +// Examples: +// +// isSemver('1.0.0') // returns true +// isSemver('v1.0') // returns true (tolerant parsing) +// isSemver('hello') // returns false +// +// Conversion to Scalars: +// +// - major/minor/patch: return the major version number as int64. +// +// .major() +// +// Examples: +// +// semver("1.2.3").major() // returns 1 +// +// Comparisons +// +// - isGreaterThan: Returns true if and only if the receiver is greater than the operand +// +// - isLessThan: Returns true if and only if the receiver is less than the operand +// +// - compareTo: Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand +// +// +// .isLessThan() +// .isGreaterThan() +// .compareTo() +// +// Examples: +// +// semver("1.2.3").compareTo(semver("1.2.3")) // returns 0 +// semver("1.2.3").compareTo(semver("2.0.0")) // returns -1 +// semver("1.2.3").compareTo(semver("0.1.2")) // returns 1 + +func SemverLib() cel.EnvOption { + return cel.Lib(semverLib) +} + +var semverLib = &semverLibType{} + +type semverLibType struct{} + +func (*semverLibType) LibraryName() string { + return "kubernetes.Semver" +} + +func (*semverLibType) Types() []*cel.Type { + return []*cel.Type{apiservercel.SemverType} +} + +func (*semverLibType) declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "semver": { + cel.Overload("string_to_semver", []*cel.Type{cel.StringType}, apiservercel.SemverType, cel.UnaryBinding((stringToSemver))), + }, + "isSemver": { + cel.Overload("is_semver_string", []*cel.Type{cel.StringType}, cel.BoolType, cel.UnaryBinding(isSemver)), + }, + "isGreaterThan": { + cel.MemberOverload("semver_is_greater_than", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.BoolType, cel.BinaryBinding(semverIsGreaterThan)), + }, + "isLessThan": { + cel.MemberOverload("semver_is_less_than", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.BoolType, cel.BinaryBinding(semverIsLessThan)), + }, + "compareTo": { + cel.MemberOverload("semver_compare_to", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.IntType, cel.BinaryBinding(semverCompareTo)), + }, + "major": { + cel.MemberOverload("semver_major", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverMajor)), + }, + "minor": { + cel.MemberOverload("semver_minor", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverMinor)), + }, + "patch": { + cel.MemberOverload("semver_patch", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverPatch)), + }, + } +} + +func (s *semverLibType) CompileOptions() []cel.EnvOption { + // Defined in this function to avoid an initialization order problem. + semverLibraryDecls := s.declarations() + options := make([]cel.EnvOption, 0, len(semverLibraryDecls)) + for name, overloads := range semverLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*semverLibType) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func isSemver(arg ref.Val) ref.Val { + str, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + // Using semver/v4 here is okay because this function isn't + // used to validate the Kubernetes API. In the CEL base library + // we would have to use the regular expression from + // pkg/apis/resource/structured/namedresources/validation/validation.go. + _, err := semver.Parse(str) + if err != nil { + return types.Bool(false) + } + + return types.Bool(true) +} + +func stringToSemver(arg ref.Val) ref.Val { + str, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + // Using semver/v4 here is okay because this function isn't + // used to validate the Kubernetes API. In the CEL base library + // we would have to use the regular expression from + // pkg/apis/resource/structured/namedresources/validation/validation.go + // first before parsing. + v, err := semver.Parse(str) + if err != nil { + return types.WrapErr(err) + } + + return apiservercel.Semver{Version: v} +} + +func semverMajor(arg ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(v.Major) +} + +func semverMinor(arg ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(v.Minor) +} + +func semverPatch(arg ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(v.Patch) +} + +func semverIsGreaterThan(arg ref.Val, other ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + v2, ok := other.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(v.Compare(v2) == 1) +} + +func semverIsLessThan(arg ref.Val, other ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + v2, ok := other.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(v.Compare(v2) == -1) +} + +func semverCompareTo(arg ref.Val, other ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + v2, ok := other.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Int(v.Compare(v2)) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/vendor/k8s.io/apiserver/pkg/cel/library/test.go index dcbc058a1..282d93962 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/test.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/test.go @@ -38,7 +38,7 @@ type testLib struct { } func (*testLib) LibraryName() string { - return "k8s.test" + return "kubernetes.test" } type TestOption func(*testLib) *testLib diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go index 8f4ba85af..4b7ffb95a 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go @@ -113,7 +113,15 @@ var urlsLib = &urls{} type urls struct{} func (*urls) LibraryName() string { - return "k8s.urls" + return "kubernetes.urls" +} + +func (*urls) Types() []*cel.Type { + return []*cel.Type{apiservercel.URLType} +} + +func (*urls) declarations() map[string][]cel.FunctionOpt { + return urlLibraryDecls } var urlLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/vendor/k8s.io/apiserver/pkg/cel/limits.go b/vendor/k8s.io/apiserver/pkg/cel/limits.go index 66ab4e44c..14b3ec2d2 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/limits.go +++ b/vendor/k8s.io/apiserver/pkg/cel/limits.go @@ -48,5 +48,7 @@ const ( // MinNumberSize is the length of literal 0 MinNumberSize = 1 + // MaxFormatSize is the maximum size we allow for format strings + MaxFormatSize = 64 MaxNameFormatRegexSize = 128 ) diff --git a/vendor/k8s.io/apiserver/pkg/cel/mutation/dynamic/objects.go b/vendor/k8s.io/apiserver/pkg/cel/mutation/dynamic/objects.go new file mode 100644 index 000000000..8dd38281b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/mutation/dynamic/objects.go @@ -0,0 +1,249 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "google.golang.org/protobuf/types/known/structpb" +) + +// ObjectType is the implementation of the Object type for use when compiling +// CEL expressions without schema information about the object. +// This is to provide CEL expressions with access to Object{} types constructors. +type ObjectType struct { + objectType *types.Type +} + +func (o *ObjectType) HasTrait(trait int) bool { + return o.objectType.HasTrait(trait) +} + +// TypeName returns the name of this ObjectType. +func (o *ObjectType) TypeName() string { + return o.objectType.TypeName() +} + +// Val returns an instance given the fields. +func (o *ObjectType) Val(fields map[string]ref.Val) ref.Val { + return NewObjectVal(o.objectType, fields) +} + +func (o *ObjectType) Type() *types.Type { + return o.objectType +} + +// Field looks up the field by name. +// This is the unstructured version that allows any name as the field name. +// The returned field is of DynType type. +func (o *ObjectType) Field(name string) (*types.FieldType, bool) { + return &types.FieldType{ + // for unstructured, we do not check for its type, + // use DynType for all fields. + Type: types.DynType, + IsSet: func(target any) bool { + if m, ok := target.(map[string]any); ok { + _, isSet := m[name] + return isSet + } + return false + }, + GetFrom: func(target any) (any, error) { + if m, ok := target.(map[string]any); ok { + return m[name], nil + } + return nil, fmt.Errorf("cannot get field %q", name) + }, + }, true +} + +func (o *ObjectType) FieldNames() ([]string, bool) { + return nil, true // Field names are not known for dynamic types. All field names are allowed. +} + +// NewObjectType creates a ObjectType by the given field name. +func NewObjectType(name string) *ObjectType { + return &ObjectType{ + objectType: types.NewObjectType(name), + } +} + +// ObjectVal is the CEL Val for an object that is constructed via the Object{} in +// CEL expressions without schema information about the object. +type ObjectVal struct { + objectType *types.Type + fields map[string]ref.Val +} + +// NewObjectVal creates an ObjectVal by its ResolvedType and its fields. +func NewObjectVal(objectType *types.Type, fields map[string]ref.Val) *ObjectVal { + return &ObjectVal{ + objectType: objectType, + fields: fields, + } +} + +var _ ref.Val = (*ObjectVal)(nil) +var _ traits.Zeroer = (*ObjectVal)(nil) + +// ConvertToNative converts the object to map[string]any. +// All nested lists are converted into []any native type. +// +// It returns an error if the target type is not map[string]any, +// or any recursive conversion fails. +func (v *ObjectVal) ConvertToNative(typeDesc reflect.Type) (any, error) { + result := make(map[string]any, len(v.fields)) + for k, v := range v.fields { + converted, err := convertField(v) + if err != nil { + return nil, fmt.Errorf("fail to convert field %q: %w", k, err) + } + result[k] = converted + } + if typeDesc == reflect.TypeOf(result) { + return result, nil + } + // CEL's builtin data literal values all support conversion to structpb.Value, which + // can then be serialized to JSON. This is convenient for CEL expressions that return + // an arbitrary JSON value, such as our MutatingAdmissionPolicy JSON Patch valueExpression + // field, so we support the conversion here, for Object data literals, as well. + if typeDesc == reflect.TypeOf(&structpb.Value{}) { + return structpb.NewStruct(result) + } + return nil, fmt.Errorf("unable to convert to %v", typeDesc) +} + +// ConvertToType supports type conversions between CEL value types supported by the expression language. +func (v *ObjectVal) ConvertToType(typeValue ref.Type) ref.Val { + if v.objectType.TypeName() == typeValue.TypeName() { + return v + } + if typeValue == types.TypeType { + return types.NewTypeTypeWithParam(v.objectType) + } + return types.NewErr("unsupported conversion into %v", typeValue) +} + +// Equal returns true if the `other` value has the same type and content as the implementing struct. +func (v *ObjectVal) Equal(other ref.Val) ref.Val { + if rhs, ok := other.(*ObjectVal); ok { + if v.objectType.Equal(rhs.objectType) != types.True { + return types.False + } + return types.Bool(reflect.DeepEqual(v.fields, rhs.fields)) + } + return types.False +} + +// Type returns the TypeValue of the value. +func (v *ObjectVal) Type() ref.Type { + return types.NewObjectType(v.objectType.TypeName()) +} + +// Value returns its value as a map[string]any. +func (v *ObjectVal) Value() any { + var result any + var object map[string]any + result, err := v.ConvertToNative(reflect.TypeOf(object)) + if err != nil { + return types.WrapErr(err) + } + return result +} + +// CheckTypeNamesMatchFieldPathNames transitively checks the CEL object type names of this ObjectVal. Returns all +// found type name mismatch errors. +// Children ObjectVal types under or this ObjectVal +// must have type names of the form ".", children of that type must have type names of the +// form ".." and so on. +// Intermediate maps and lists are unnamed and ignored. +func (v *ObjectVal) CheckTypeNamesMatchFieldPathNames() error { + return errors.Join(typeCheck(v, []string{v.Type().TypeName()})...) + +} + +func typeCheck(v ref.Val, typeNamePath []string) []error { + var errs []error + if ov, ok := v.(*ObjectVal); ok { + tn := ov.objectType.TypeName() + if strings.Join(typeNamePath, ".") != tn { + errs = append(errs, fmt.Errorf("unexpected type name %q, expected %q, which matches field name path from root Object type", tn, strings.Join(typeNamePath, "."))) + } + for k, f := range ov.fields { + errs = append(errs, typeCheck(f, append(typeNamePath, k))...) + } + } + value := v.Value() + if listOfVal, ok := value.([]ref.Val); ok { + for _, v := range listOfVal { + errs = append(errs, typeCheck(v, typeNamePath)...) + } + } + + if mapOfVal, ok := value.(map[ref.Val]ref.Val); ok { + for _, v := range mapOfVal { + errs = append(errs, typeCheck(v, typeNamePath)...) + } + } + return errs +} + +// IsZeroValue indicates whether the object is the zero value for the type. +// For the ObjectVal, it is zero value if and only if the fields map is empty. +func (v *ObjectVal) IsZeroValue() bool { + return len(v.fields) == 0 +} + +// convertField converts a referred ref.Val to its expected type. +// For objects, the expected type is map[string]any +// For lists, the expected type is []any +// For maps, the expected type is map[string]any +// For anything else, it is converted via value.Value() +// +// It will return an error if the request type is a map but the key +// is not a string. +func convertField(value ref.Val) (any, error) { + // special handling for lists, where the elements are converted with Value() instead of ConvertToNative + // to allow them to become native value of any type. + if listOfVal, ok := value.Value().([]ref.Val); ok { + var result []any + for _, v := range listOfVal { + result = append(result, v.Value()) + } + return result, nil + } + // unstructured maps, as seen in annotations + // map keys must be strings + if mapOfVal, ok := value.Value().(map[ref.Val]ref.Val); ok { + result := make(map[string]any, len(mapOfVal)) + for k, v := range mapOfVal { + stringKey, ok := k.Value().(string) + if !ok { + return nil, fmt.Errorf("map key %q is of type %T, not string", k, k) + } + result[stringKey] = v.Value() + } + return result, nil + } + return value.Value(), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/mutation/jsonpatch.go b/vendor/k8s.io/apiserver/pkg/cel/mutation/jsonpatch.go new file mode 100644 index 000000000..1f6d63fa2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/mutation/jsonpatch.go @@ -0,0 +1,185 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutation + +import ( + "fmt" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "reflect" +) + +var jsonPatchType = types.NewObjectType(JSONPatchTypeName) + +var ( + jsonPatchOp = "op" + jsonPatchPath = "path" + jsonPatchFrom = "from" + jsonPatchValue = "value" +) + +// JSONPatchType and JSONPatchVal are defined entirely from scratch here because JSONPatchVal +// has a dynamic 'value' field which can not be defined with an OpenAPI schema, +// preventing us from using DeclType and UnstructuredToVal. + +// JSONPatchType provides a CEL type for "JSONPatch" operations. +type JSONPatchType struct{} + +func (r *JSONPatchType) HasTrait(trait int) bool { + return jsonPatchType.HasTrait(trait) +} + +// TypeName returns the name of this ObjectType. +func (r *JSONPatchType) TypeName() string { + return jsonPatchType.TypeName() +} + +// Val returns an instance given the fields. +func (r *JSONPatchType) Val(fields map[string]ref.Val) ref.Val { + result := &JSONPatchVal{} + for name, value := range fields { + switch name { + case jsonPatchOp: + if s, ok := value.Value().(string); ok { + result.Op = s + } else { + return types.NewErr("unexpected type %T for JSONPatchType 'op' field", value.Value()) + } + case jsonPatchPath: + if s, ok := value.Value().(string); ok { + result.Path = s + } else { + return types.NewErr("unexpected type %T for JSONPatchType 'path' field", value.Value()) + } + case jsonPatchFrom: + if s, ok := value.Value().(string); ok { + result.From = s + } else { + return types.NewErr("unexpected type %T for JSONPatchType 'from' field", value.Value()) + } + case jsonPatchValue: + result.Val = value + default: + return types.NewErr("unexpected JSONPatchType field: %s", name) + } + } + return result +} + +func (r *JSONPatchType) Type() *types.Type { + return jsonPatchType +} + +func (r *JSONPatchType) Field(name string) (*types.FieldType, bool) { + var fieldType *types.Type + switch name { + case jsonPatchOp, jsonPatchFrom, jsonPatchPath: + fieldType = cel.StringType + case jsonPatchValue: + fieldType = types.DynType + } + return &types.FieldType{ + Type: fieldType, + }, true +} + +func (r *JSONPatchType) FieldNames() ([]string, bool) { + return []string{jsonPatchOp, jsonPatchFrom, jsonPatchPath, jsonPatchValue}, true +} + +// JSONPatchVal is the ref.Val for a JSONPatch. +type JSONPatchVal struct { + Op, From, Path string + Val ref.Val +} + +func (p *JSONPatchVal) ConvertToNative(typeDesc reflect.Type) (any, error) { + if typeDesc == reflect.TypeOf(&JSONPatchVal{}) { + return p, nil + } + return nil, fmt.Errorf("cannot convert to native type: %v", typeDesc) +} + +func (p *JSONPatchVal) ConvertToType(typeValue ref.Type) ref.Val { + if typeValue == jsonPatchType { + return p + } else if typeValue == types.TypeType { + return types.NewTypeTypeWithParam(jsonPatchType) + } + return types.NewErr("unsupported type: %s", typeValue.TypeName()) +} + +func (p *JSONPatchVal) Equal(other ref.Val) ref.Val { + if o, ok := other.(*JSONPatchVal); ok && p != nil && o != nil { + if p.Op != o.Op || p.From != o.From || p.Path != o.Path { + return types.False + } + if (p.Val == nil) != (o.Val == nil) { + return types.False + } + if p.Val == nil { + return types.True + } + return p.Val.Equal(o.Val) + } + return types.False +} + +func (p *JSONPatchVal) Get(index ref.Val) ref.Val { + if name, ok := index.Value().(string); ok { + switch name { + case jsonPatchOp: + return types.String(p.Op) + case jsonPatchPath: + return types.String(p.Path) + case jsonPatchFrom: + return types.String(p.From) + case jsonPatchValue: + return p.Val + default: + + } + } + return types.NewErr("unsupported indexer: %s", index) +} + +func (p *JSONPatchVal) IsSet(field ref.Val) ref.Val { + if name, ok := field.Value().(string); ok { + switch name { + case jsonPatchOp: + return types.Bool(len(p.Op) > 0) + case jsonPatchPath: + return types.Bool(len(p.Path) > 0) + case jsonPatchFrom: + return types.Bool(len(p.From) > 0) + case jsonPatchValue: + return types.Bool(p.Val != nil) + } + } + return types.NewErr("unsupported field: %s", field) +} + +func (p *JSONPatchVal) Type() ref.Type { + return jsonPatchType +} + +func (p *JSONPatchVal) Value() any { + return p +} + +var _ ref.Val = &JSONPatchVal{} diff --git a/vendor/k8s.io/apiserver/pkg/cel/mutation/typeresolver.go b/vendor/k8s.io/apiserver/pkg/cel/mutation/typeresolver.go new file mode 100644 index 000000000..aceed5ae5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/mutation/typeresolver.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutation + +import ( + "strings" + + "k8s.io/apiserver/pkg/cel/common" + "k8s.io/apiserver/pkg/cel/mutation/dynamic" +) + +// ObjectTypeName is the name of Object types that are used to declare the types of +// Kubernetes objects in CEL dynamically using the naming scheme "Object....". +// For example "Object.spec.containers" is the type of the spec.containers field of the object in scope. +const ObjectTypeName = "Object" + +// JSONPatchTypeName is the name of the JSONPatch type. This type is typically used to create JSON patches +// in CEL expressions. +const JSONPatchTypeName = "JSONPatch" + +// DynamicTypeResolver resolves the Object and JSONPatch types when compiling +// CEL expressions without schema information about the object. +type DynamicTypeResolver struct{} + +func (r *DynamicTypeResolver) Resolve(name string) (common.ResolvedType, bool) { + if name == JSONPatchTypeName { + return &JSONPatchType{}, true + } + if name == ObjectTypeName || strings.HasPrefix(name, ObjectTypeName+".") { + return dynamic.NewObjectType(name), true + } + return nil, false +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/semver.go b/vendor/k8s.io/apiserver/pkg/cel/semver.go new file mode 100644 index 000000000..c53b9c306 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/semver.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + + "github.com/blang/semver/v4" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +var ( + SemverType = cel.ObjectType("kubernetes.Semver") +) + +// Semver provdes a CEL representation of a [semver.Version]. +type Semver struct { + semver.Version +} + +func (v Semver) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + if reflect.TypeOf(v.Version).AssignableTo(typeDesc) { + return v.Version, nil + } + if reflect.TypeOf("").AssignableTo(typeDesc) { + return v.Version.String(), nil + } + return nil, fmt.Errorf("type conversion error from 'Semver' to '%v'", typeDesc) +} + +func (v Semver) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case SemverType: + return v + case types.TypeType: + return SemverType + default: + return types.NewErr("type conversion error from '%s' to '%s'", SemverType, typeVal) + } +} + +func (v Semver) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Semver) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(v.Version.EQ(otherDur.Version)) +} + +func (v Semver) Type() ref.Type { + return SemverType +} + +func (v Semver) Value() interface{} { + return v.Version +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/types.go b/vendor/k8s.io/apiserver/pkg/cel/types.go index 83c90c891..84bfd7e65 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/types.go +++ b/vendor/k8s.io/apiserver/pkg/cel/types.go @@ -429,7 +429,7 @@ func (rt *DeclTypeProvider) FindStructType(typeName string) (*types.Type, bool) declType, found := rt.findDeclType(typeName) if found { expT := declType.CelType() - return expT, found + return types.NewTypeTypeWithParam(expT), found } return rt.typeProvider.FindStructType(typeName) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go index 24f0a3452..8cc52e05f 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go @@ -17,6 +17,7 @@ limitations under the License. package discovery import ( + "context" "net/http" "sync" @@ -33,12 +34,21 @@ import ( // GroupManager is an interface that allows dynamic mutation of the existing webservice to handle // API groups being added or removed. type GroupManager interface { + GroupLister + AddGroup(apiGroup metav1.APIGroup) RemoveGroup(groupName string) ServeHTTP(resp http.ResponseWriter, req *http.Request) WebService() *restful.WebService } +// GroupLister knows how to list APIGroups for discovery. +type GroupLister interface { + // Groups returns APIGroups for discovery, filling in ServerAddressByClientCIDRs + // based on data in req. + Groups(ctx context.Context, req *http.Request) ([]metav1.APIGroup, error) +} + // rootAPIsHandler creates a webservice serving api group discovery. // The list of APIGroups may change while the server is running because additional resources // are registered or removed. It is not safe to cache the values. @@ -94,24 +104,40 @@ func (s *rootAPIsHandler) RemoveGroup(groupName string) { } } -func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { +func (s *rootAPIsHandler) Groups(ctx context.Context, req *http.Request) ([]metav1.APIGroup, error) { s.lock.RLock() defer s.lock.RUnlock() + return s.groupsLocked(ctx, req), nil +} + +// groupsLocked returns the APIGroupList discovery information for this handler. +// The caller must hold the lock before invoking this method to avoid data races. +func (s *rootAPIsHandler) groupsLocked(ctx context.Context, req *http.Request) []metav1.APIGroup { + clientIP := utilnet.GetClientIP(req) + serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP) + orderedGroups := []metav1.APIGroup{} for _, groupName := range s.apiGroupNames { orderedGroups = append(orderedGroups, s.apiGroups[groupName]) } - clientIP := utilnet.GetClientIP(req) - serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP) groups := make([]metav1.APIGroup, len(orderedGroups)) for i := range orderedGroups { groups[i] = orderedGroups[i] groups[i].ServerAddressByClientCIDRs = serverCIDR } - responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &metav1.APIGroupList{Groups: groups}, false) + return groups +} + +func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + s.lock.RLock() + defer s.lock.RUnlock() + + groupList := metav1.APIGroupList{Groups: s.groupsLocked(req.Context(), req)} + + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &groupList, false) } func (s *rootAPIsHandler) restfulHandle(req *restful.Request, resp *restful.Response) { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index 64b3569d0..980e11f6e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -54,6 +54,7 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed } standardRequestHeaderConfig := &authenticatorfactory.RequestHeaderConfig{ UsernameHeaders: headerrequest.StaticStringSlice{"X-Remote-User"}, + UIDHeaders: headerrequest.StaticStringSlice{"X-Remote-Uid"}, GroupHeaders: headerrequest.StaticStringSlice{"X-Remote-Group"}, ExtraHeaderPrefixes: headerrequest.StaticStringSlice{"X-Remote-Extra-"}, } @@ -90,6 +91,7 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed headerrequest.ClearAuthenticationHeaders( req.Header, standardRequestHeaderConfig.UsernameHeaders, + standardRequestHeaderConfig.UIDHeaders, standardRequestHeaderConfig.GroupHeaders, standardRequestHeaderConfig.ExtraHeaderPrefixes, ) @@ -99,6 +101,7 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed headerrequest.ClearAuthenticationHeaders( req.Header, requestHeaderConfig.UsernameHeaders, + requestHeaderConfig.UIDHeaders, requestHeaderConfig.GroupHeaders, requestHeaderConfig.ExtraHeaderPrefixes, ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go index a60485d20..612b1a014 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/managedfields" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/discovery" @@ -71,9 +70,6 @@ type APIGroupVersion struct { // version (for when the inevitable meta/v2 group emerges). MetaGroupVersion *schema.GroupVersion - // RootScopedKinds are the root scoped kinds for the primary GroupVersion - RootScopedKinds sets.String - // Serializer is used to determine how to convert responses from API methods into bytes to send over // the wire. Serializer runtime.NegotiatedSerializer diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 120d3f665..55f310daa 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -55,6 +55,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Create", traceFields(req)...) + req = req.WithContext(ctx) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 663fd8d52..0abba257e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -30,19 +30,27 @@ import ( metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/handlers/finisher" requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/apihelpers" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" + + "k8s.io/klog/v2" + "k8s.io/utils/ptr" ) // DeleteResource returns a function that will handle a resource deletion @@ -52,6 +60,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) + req = req.WithContext(ctx) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) @@ -84,7 +93,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) + s, err := negotiation.NegotiateInputSerializer(req, false, apihelpers.GetMetaInternalVersionCodecs()) if err != nil { scope.err(err, w, req) return @@ -92,7 +101,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") - obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + obj, gvk, err := apihelpers.GetMetaInternalVersionCodecs().DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return @@ -104,7 +113,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc span.AddEvent("Decoded delete options") objGV := gvk.GroupVersion() - audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) + audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, apihelpers.GetMetaInternalVersionCodecs()) span.AddEvent("Recorded the audit event") } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { @@ -114,6 +123,9 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc } } } + if !utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) && options != nil { + options.IgnoreStoreReadErrorWithClusterBreakingPotential = nil + } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) @@ -121,10 +133,36 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) - span.AddEvent("About to delete object from database") - wasDeleted := true userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) + + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + if options != nil && ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) { + // let's make sure that the audit will reflect that this delete request + // was tried with ignoreStoreReadErrorWithClusterBreakingPotential enabled + audit.AddAuditAnnotation(ctx, "apiserver.k8s.io/unsafe-delete-ignore-read-error", "") + + p, ok := r.(rest.CorruptObjectDeleterProvider) + if !ok || p.GetCorruptObjDeleter() == nil { + // this is a developer error + scope.err(errors.NewInternalError(fmt.Errorf("no unsafe deleter provided, can not honor ignoreStoreReadErrorWithClusterBreakingPotential")), w, req) + return + } + if scope.Authorizer == nil { + scope.err(errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize unsafe delete")), w, req) + return + } + if err := authorizeUnsafeDelete(ctx, staticAdmissionAttrs, scope.Authorizer); err != nil { + scope.err(err, w, req) + return + } + + r = p.GetCorruptObjDeleter() + } + } + + span.AddEvent("About to delete object from database") + wasDeleted := true result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) wasDeleted = deleted @@ -172,6 +210,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) + req = req.WithContext(ctx) defer span.End(500 * time.Millisecond) namespace, err := scope.Namer.Namespace(req) @@ -229,7 +268,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) + s, err := negotiation.NegotiateInputSerializer(req, false, apihelpers.GetMetaInternalVersionCodecs()) if err != nil { scope.err(err, w, req) return @@ -237,7 +276,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") - obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + obj, gvk, err := apihelpers.GetMetaInternalVersionCodecs().DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return @@ -248,7 +287,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc } objGV := gvk.GroupVersion() - audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) + audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, apihelpers.GetMetaInternalVersionCodecs()) } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) @@ -257,11 +296,26 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc } } } + if !utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) && options != nil { + options.IgnoreStoreReadErrorWithClusterBreakingPotential = nil + } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } + + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + if options != nil && ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) { + fieldErrList := field.ErrorList{ + field.Invalid(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), true, "is not allowed with DELETECOLLECTION, try again after removing the option"), + } + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", fieldErrList) + scope.err(err, w, req) + return + } + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) admit = admission.WithAudit(admit) @@ -292,3 +346,77 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } } + +// authorizeUnsafeDelete ensures that the user has permission to do +// 'unsafe-delete-ignore-read-errors' on the resource being deleted when +// ignoreStoreReadErrorWithClusterBreakingPotential is enabled +func authorizeUnsafeDelete(ctx context.Context, attr admission.Attributes, authz authorizer.Authorizer) (err error) { + if attr.GetOperation() != admission.Delete || attr.GetOperationOptions() == nil { + return nil + } + options, ok := attr.GetOperationOptions().(*metav1.DeleteOptions) + if !ok { + return errors.NewInternalError(fmt.Errorf("expected an option of type: %T, but got: %T", &metav1.DeleteOptions{}, attr.GetOperationOptions())) + } + if !ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) { + return nil + } + + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + return admission.NewForbidden(attr, fmt.Errorf("no RequestInfo found in the context")) + } + if !requestInfo.IsResourceRequest || len(attr.GetSubresource()) > 0 { + return admission.NewForbidden(attr, fmt.Errorf("ignoreStoreReadErrorWithClusterBreakingPotential delete option is not allowed on a subresource or non-resource request")) + } + + // if we are here, IgnoreStoreReadErrorWithClusterBreakingPotential + // is set to true in the delete options, the user must have permission + // to do 'unsafe-delete-ignore-read-errors' on the given resource. + record := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "unsafe-delete-ignore-read-errors", + Namespace: attr.GetNamespace(), + Name: attr.GetName(), + APIGroup: attr.GetResource().Group, + APIVersion: attr.GetResource().Version, + Resource: attr.GetResource().Resource, + ResourceRequest: true, + } + // TODO: can't use ResourceAttributesFrom from k8s.io/kubernetes/pkg/registry/authorization/util + // due to prevent staging --> k8s.io/kubernetes dep issue + if utilfeature.DefaultFeatureGate.Enabled(features.AuthorizeWithSelectors) { + if len(requestInfo.FieldSelector) > 0 { + fieldSelector, err := fields.ParseSelector(requestInfo.FieldSelector) + if err != nil { + record.FieldSelectorRequirements, record.FieldSelectorParsingErr = nil, err + } else { + if requirements := fieldSelector.Requirements(); len(requirements) > 0 { + record.FieldSelectorRequirements, record.FieldSelectorParsingErr = fieldSelector.Requirements(), nil + } + } + } + if len(requestInfo.LabelSelector) > 0 { + labelSelector, err := labels.Parse(requestInfo.LabelSelector) + if err != nil { + record.LabelSelectorRequirements, record.LabelSelectorParsingErr = nil, err + } else { + if requirements, _ /*selectable*/ := labelSelector.Requirements(); len(requirements) > 0 { + record.LabelSelectorRequirements, record.LabelSelectorParsingErr = requirements, nil + } + } + } + } + + decision, reason, err := authz.Authorize(ctx, record) + if err != nil { + err = fmt.Errorf("error while checking permission for %q, %w", record.Verb, err) + klog.FromContext(ctx).V(1).Error(err, "failed to authorize") + return admission.NewForbidden(attr, err) + } + if decision == authorizer.DecisionAllow { + return nil + } + + return admission.NewForbidden(attr, fmt.Errorf("not permitted to do %q, reason: %s", record.Verb, reason)) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index 3cc4a728a..94a44c802 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -45,6 +45,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ) // getterFunc performs a get request with the given context and object name. The request @@ -57,6 +58,7 @@ func getResourceHandler(scope *RequestScope, getter getterFunc) http.HandlerFunc return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx, span := tracing.Start(ctx, "Get", traceFields(req)...) + req = req.WithContext(ctx) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) @@ -171,6 +173,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "List", traceFields(req)...) + req = req.WithContext(ctx) namespace, err := scope.Namer.Namespace(req) if err != nil { @@ -185,15 +188,8 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc if err != nil { hasName = false } - ctx = request.WithNamespace(ctx, namespace) - outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) - if err != nil { - scope.err(err, w, req) - return - } - opts := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { err = errors.NewBadRequest(err.Error()) @@ -208,6 +204,17 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc return } + var restrictions negotiation.EndpointRestrictions + restrictions = scope + if isListWatchRequest(opts) { + restrictions = &watchListEndpointRestrictions{scope} + } + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, restrictions) + if err != nil { + scope.err(err, w, req) + return + } + // transform fields // TODO: DecodeParametersInto should do this. if opts.FieldSelector != nil { @@ -258,6 +265,16 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc if timeout == 0 && minRequestTimeout > 0 { timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) } + + var emptyVersionedList runtime.Object + if isListWatchRequest(opts) { + emptyVersionedList, err = scope.Convertor.ConvertToVersion(r.NewList(), scope.Kind.GroupVersion()) + if err != nil { + scope.err(errors.NewInternalError(err), w, req) + return + } + } + klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout) ctx, cancel := context.WithTimeout(ctx, timeout) defer func() { cancel() }() @@ -266,7 +283,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc scope.err(err, w, req) return } - handler, err := serveWatchHandler(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts)) + handler, err := serveWatchHandler(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts), emptyVersionedList) if err != nil { scope.err(err, w, req) return @@ -307,3 +324,18 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } } + +type watchListEndpointRestrictions struct { + negotiation.EndpointRestrictions +} + +func (e *watchListEndpointRestrictions) AllowsMediaTypeTransform(mimeType, mimeSubType string, target *schema.GroupVersionKind) bool { + if target != nil && target.Kind == "Table" { + return false + } + return e.EndpointRestrictions.AllowsMediaTypeTransform(mimeType, mimeSubType, target) +} + +func isListWatchRequest(opts metainternalversion.ListOptions) bool { + return utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && ptr.Deref(opts.SendInitialEvents, false) && opts.AllowWatchBookmarks +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go index 9dbad1ea6..7667e6639 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -26,6 +26,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" ) // MediaTypesForSerializer returns a list of media and stream media types for the server. @@ -33,6 +35,10 @@ func MediaTypesForSerializer(ns runtime.NegotiatedSerializer) (mediaTypes, strea for _, info := range ns.SupportedMediaTypes() { mediaTypes = append(mediaTypes, info.MediaType) if info.StreamSerializer != nil { + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) && info.MediaType == runtime.ContentTypeCBOR { + streamMediaTypes = append(streamMediaTypes, runtime.ContentTypeCBORSequence) + continue + } // stream=watch is the existing mime-type parameter for watch streamMediaTypes = append(streamMediaTypes, info.MediaType+";stream=watch") } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 14278aed7..acfff1961 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -35,9 +35,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/managedfields" "k8s.io/apimachinery/pkg/util/mergepatch" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/validation/field" @@ -50,8 +52,10 @@ import ( requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" ) @@ -66,6 +70,7 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Patch", traceFields(req)...) + req = req.WithContext(ctx) defer span.End(500 * time.Millisecond) // Do this first, otherwise name extraction can fail for unrecognized content types @@ -128,10 +133,25 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac audit.LogRequestPatch(req.Context(), patchBytes) span.AddEvent("Recorded the audit event") - baseContentType := runtime.ContentTypeJSON - if patchType == types.ApplyPatchType { + var baseContentType string + switch patchType { + case types.ApplyYAMLPatchType: baseContentType = runtime.ContentTypeYAML + case types.ApplyCBORPatchType: + if !utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + // This request should have already been rejected by the + // Content-Type allowlist check. Return 500 because assumptions are + // already broken and the feature is not GA. + utilruntime.HandleErrorWithContext(req.Context(), nil, "The patch content-type allowlist check should have made this unreachable.") + scope.err(errors.NewInternalError(errors.NewInternalError(fmt.Errorf("unexpected patch type: %v", patchType))), w, req) + return + } + + baseContentType = runtime.ContentTypeCBOR + default: + baseContentType = runtime.ContentTypeJSON } + s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), baseContentType) if !ok { scope.err(fmt.Errorf("no serializer defined for %v", baseContentType), w, req) @@ -451,6 +471,20 @@ func (p *smpPatcher) createNewObject(_ context.Context) (runtime.Object, error) return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) } +func newApplyPatcher(p *patcher, fieldManager *managedfields.FieldManager, unmarshalFn, unmarshalStrictFn func([]byte, interface{}) error) *applyPatcher { + return &applyPatcher{ + fieldManager: fieldManager, + patch: p.patchBytes, + options: p.options, + creater: p.creater, + kind: p.kind, + userAgent: p.userAgent, + validationDirective: p.validationDirective, + unmarshalFn: unmarshalFn, + unmarshalStrictFn: unmarshalStrictFn, + } +} + type applyPatcher struct { patch []byte options *metav1.PatchOptions @@ -459,6 +493,8 @@ type applyPatcher struct { fieldManager *managedfields.FieldManager userAgent string validationDirective string + unmarshalFn func(data []byte, v interface{}) error + unmarshalStrictFn func(data []byte, v interface{}) error } func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context, obj runtime.Object) (runtime.Object, error) { @@ -471,7 +507,7 @@ func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context, } patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} - if err := yaml.Unmarshal(p.patch, &patchObj.Object); err != nil { + if err := p.unmarshalFn(p.patch, &patchObj.Object); err != nil { return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err)) } @@ -483,7 +519,7 @@ func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context, // TODO: spawn something to track deciding whether a fieldValidation=Strict // fatal error should return before an error from the apply operation if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn { - if err := yaml.UnmarshalStrict(p.patch, &map[string]interface{}{}); err != nil { + if err := p.unmarshalStrictFn(p.patch, &map[string]interface{}{}); err != nil { if p.validationDirective == metav1.FieldValidationStrict { return nil, errors.NewBadRequest(fmt.Sprintf("error strict decoding YAML: %v", err)) } @@ -633,16 +669,21 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti fieldManager: scope.FieldManager, } // this case is unreachable if ServerSideApply is not enabled because we will have already rejected the content type - case types.ApplyPatchType: - p.mechanism = &applyPatcher{ - fieldManager: scope.FieldManager, - patch: p.patchBytes, - options: p.options, - creater: p.creater, - kind: p.kind, - userAgent: p.userAgent, - validationDirective: p.validationDirective, - } + case types.ApplyYAMLPatchType: + p.mechanism = newApplyPatcher(p, scope.FieldManager, yaml.Unmarshal, yaml.UnmarshalStrict) + p.forceAllowCreate = true + case types.ApplyCBORPatchType: + if !utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + utilruntime.HandleErrorWithContext(context.TODO(), nil, "CBOR apply requests should be rejected before reaching this point unless the feature gate is enabled.") + return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType) + } + + // The strict and non-strict funcs are the same here because any CBOR map with + // duplicate keys is invalid and always rejected outright regardless of strictness + // mode, and unknown field errors can't occur in practice because the type of the + // destination value for unmarshaling an apply configuration is always + // "unstructured". + p.mechanism = newApplyPatcher(p, scope.FieldManager, cbor.Unmarshal, cbor.Unmarshal) p.forceAllowCreate = true default: return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType) @@ -669,7 +710,7 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti result, err := requestFunc() // If the object wasn't committed to storage because it's serialized size was too large, // it is safe to remove managedFields (which can be large) and try again. - if isTooLargeError(err) && p.patchType != types.ApplyPatchType { + if isTooLargeError(err) && p.patchType != types.ApplyYAMLPatchType && p.patchType != types.ApplyCBORPatchType { if _, accessorErr := meta.Accessor(p.restPatcher.New()); accessorErr == nil { p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, p.applyPatch, diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go index 348b1092d..3f7ad6121 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -18,6 +18,7 @@ package handlers import ( "context" + "encoding/base64" "encoding/json" "fmt" "io" @@ -38,8 +39,9 @@ import ( "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/metrics" endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" - - klog "k8s.io/klog/v2" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/util/apihelpers" + "k8s.io/klog/v2" ) // watchEmbeddedEncoder performs encoding of the embedded object. @@ -147,6 +149,8 @@ type watchEncoder struct { encoder runtime.Encoder framer io.Writer + watchListTransformerFn watchListTransformerFunction + buffer runtime.Splice eventBuffer runtime.Splice @@ -154,15 +158,16 @@ type watchEncoder struct { identifiers map[watch.EventType]runtime.Identifier } -func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer) *watchEncoder { +func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer, watchListTransformerFn watchListTransformerFunction) *watchEncoder { return &watchEncoder{ - ctx: ctx, - kind: kind, - embeddedEncoder: embeddedEncoder, - encoder: encoder, - framer: framer, - buffer: runtime.NewSpliceBuffer(), - eventBuffer: runtime.NewSpliceBuffer(), + ctx: ctx, + kind: kind, + embeddedEncoder: embeddedEncoder, + encoder: encoder, + framer: framer, + watchListTransformerFn: watchListTransformerFn, + buffer: runtime.NewSpliceBuffer(), + eventBuffer: runtime.NewSpliceBuffer(), } } @@ -174,6 +179,12 @@ func (e *watchEncoder) Encode(event watch.Event) error { encodeFunc := func(obj runtime.Object, w io.Writer) error { return e.doEncode(obj, event, w) } + if event.Type == watch.Bookmark { + // Bookmark objects are small, and we don't yet support serialization for them. + // Additionally, we need to additionally transform them to support watch-list feature + event = e.watchListTransformerFn(event) + return encodeFunc(event.Object, e.framer) + } if co, ok := event.Object.(runtime.CacheableObject); ok { return co.CacheEncode(e.identifier(event.Type), encodeFunc, e.framer) } @@ -270,7 +281,7 @@ func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{} return asTable(ctx, obj, options, scope, target.GroupVersion()) default: - accepted, _ := negotiation.MediaTypesForSerializer(metainternalversionscheme.Codecs) + accepted, _ := negotiation.MediaTypesForSerializer(apihelpers.GetMetaInternalVersionCodecs()) err := negotiation.NewNotAcceptableError(accepted) return nil, err } @@ -304,7 +315,7 @@ func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.Media case target == nil: case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): - return *target, metainternalversionscheme.Codecs, true + return *target, apihelpers.GetMetaInternalVersionCodecs(), true } return scope.Kind, scope.Serializer, false } @@ -479,3 +490,94 @@ func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.Grou return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) } } + +// watchListTransformerFunction an optional function +// applied to watchlist bookmark events that transforms +// the embedded object before sending it to a client. +type watchListTransformerFunction func(watch.Event) watch.Event + +// watchListTransformer performs transformation of +// a special watchList bookmark event. +// +// The bookmark is annotated with InitialEventsListBlueprintAnnotationKey +// and contains an empty, versioned list that we must encode in the requested format +// (e.g., protobuf, JSON, CBOR) and then store as a base64-encoded string. +type watchListTransformer struct { + initialEventsListBlueprint runtime.Object + targetGVK *schema.GroupVersionKind + negotiatedEncoder runtime.Encoder + buffer runtime.Splice +} + +// createWatchListTransformerIfRequested returns a transformer function for watchlist bookmark event. +func newWatchListTransformer(initialEventsListBlueprint runtime.Object, targetGVK *schema.GroupVersionKind, negotiatedEncoder runtime.Encoder) *watchListTransformer { + return &watchListTransformer{ + initialEventsListBlueprint: initialEventsListBlueprint, + targetGVK: targetGVK, + negotiatedEncoder: negotiatedEncoder, + buffer: runtime.NewSpliceBuffer(), + } +} + +func (e *watchListTransformer) transform(event watch.Event) watch.Event { + if e.initialEventsListBlueprint == nil { + return event + } + hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object) + if err != nil { + return newWatchEventErrorFor(err) + } + if !hasAnnotation { + return event + } + + if err = e.encodeInitialEventsListBlueprint(event.Object); err != nil { + return newWatchEventErrorFor(err) + } + + return event +} + +func (e *watchListTransformer) encodeInitialEventsListBlueprint(object runtime.Object) error { + initialEventsListBlueprint, err := e.transformInitialEventsListBlueprint() + if err != nil { + return err + } + + defer e.buffer.Reset() + if err = e.negotiatedEncoder.Encode(initialEventsListBlueprint, e.buffer); err != nil { + return err + } + encodedInitialEventsListBlueprint := e.buffer.Bytes() + + // the storage layer creates a deep copy of the obj before modifying it. + // since the object has the annotation, we can modify it directly. + objectMeta, err := meta.Accessor(object) + if err != nil { + return err + } + annotations := objectMeta.GetAnnotations() + annotations[metav1.InitialEventsListBlueprintAnnotationKey] = base64.StdEncoding.EncodeToString(encodedInitialEventsListBlueprint) + objectMeta.SetAnnotations(annotations) + + return nil +} + +func (e *watchListTransformer) transformInitialEventsListBlueprint() (runtime.Object, error) { + if e.targetGVK != nil && e.targetGVK.Kind == "PartialObjectMetadata" { + return asPartialObjectMetadataList(e.initialEventsListBlueprint, e.targetGVK.GroupVersion()) + } + return e.initialEventsListBlueprint, nil +} + +func newWatchEventErrorFor(err error) watch.Event { + return watch.Event{ + Type: watch.Error, + Object: &metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + Reason: metav1.StatusReasonInternalError, + Code: http.StatusInternalServerError, + }, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go index d13bee4d2..07316e802 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go @@ -34,18 +34,24 @@ var sanitizer = strings.NewReplacer(`&`, "&", `<`, "<", `>`, ">") // Forbidden renders a simple forbidden error func Forbidden(ctx context.Context, attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string, s runtime.NegotiatedSerializer) { - msg := sanitizer.Replace(forbiddenMessage(attributes)) w.Header().Set("X-Content-Type-Options", "nosniff") + gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()} + ErrorNegotiated(ForbiddenStatusError(attributes, reason), s, gv, w, req) +} + +func ForbiddenStatusError(attributes authorizer.Attributes, reason string) *apierrors.StatusError { + msg := sanitizer.Replace(forbiddenMessage(attributes)) - var errMsg string + var errMsg error if len(reason) == 0 { - errMsg = fmt.Sprintf("%s", msg) + errMsg = fmt.Errorf("%s", msg) } else { - errMsg = fmt.Sprintf("%s: %s", msg, reason) + errMsg = fmt.Errorf("%s: %s", msg, reason) } - gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()} + gr := schema.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()} - ErrorNegotiated(apierrors.NewForbidden(gr, attributes.GetName(), fmt.Errorf(errMsg)), s, gv, w, req) + + return apierrors.NewForbidden(gr, attributes.GetName(), errMsg) } func forbiddenMessage(attributes authorizer.Attributes) string { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go index acd8f0357..3ca5cba8c 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go @@ -98,6 +98,7 @@ func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.Response attribute.String("protocol", req.Proto), attribute.String("mediaType", mediaType), attribute.String("encoder", string(encoder.Identifier()))) + req = req.WithContext(ctx) defer span.End(5 * time.Second) w := &deferredResponseWriter{ @@ -284,7 +285,12 @@ func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiat audit.LogResponseObject(req.Context(), object, gv, s) - encoder := s.EncoderForVersion(serializer.Serializer, gv) + var encoder runtime.Encoder + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + encoder = s.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.Serializer), gv) + } else { + encoder = s.EncoderForVersion(serializer.Serializer, gv) + } request.TrackSerializeResponseObjectLatency(req.Context(), func() { if listGVKInContentType { SerializeObject(generateMediaTypeWithGVK(serializer.MediaType, mediaType.Convert), encoder, w, req, statusCode, object) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 4b76ef97e..dcc57480e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -39,6 +39,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/handlers/finisher" requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" @@ -52,6 +53,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Update", traceFields(req)...) + req = req.WithContext(ctx) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) @@ -275,13 +277,7 @@ func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer } // The user is not authorized to perform this action, so we need to build the error response - gr := schema.GroupResource{ - Group: attributes.GetAPIGroup(), - Resource: attributes.GetResource(), - } - name := attributes.GetName() - err := fmt.Errorf("%v", authorizerReason) - return errors.NewForbidden(gr, name, err) + return responsewriters.ForbiddenStatusError(attributes, authorizerReason) } } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index 6a9257d10..c239d1f7a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -64,7 +64,7 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) { // serveWatchHandler returns a handle to serve a watch response. // TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled. -func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) (http.Handler, error) { +func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string, initialEventsListBlueprint runtime.Object) (http.Handler, error) { options, err := optionsForTransform(mediaTypeOptions, req) if err != nil { return nil, err @@ -76,40 +76,62 @@ func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOp return nil, err } framer := serializer.StreamSerializer.Framer - streamSerializer := serializer.StreamSerializer.Serializer - encoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion()) + var encoder runtime.Encoder + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + encoder = scope.Serializer.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.StreamSerializer.Serializer), scope.Kind.GroupVersion()) + } else { + encoder = scope.Serializer.EncoderForVersion(serializer.StreamSerializer.Serializer, scope.Kind.GroupVersion()) + } useTextFraming := serializer.EncodesAsText if framer == nil { return nil, fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType) } // TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here mediaType := serializer.MediaType - if mediaType != runtime.ContentTypeJSON { + switch mediaType { + case runtime.ContentTypeJSON: + // as-is + case runtime.ContentTypeCBOR: + // If a client indicated it accepts application/cbor (exactly one data item) on a + // watch request, set the conformant application/cbor-seq media type the watch + // response. RFC 9110 allows an origin server to deviate from the indicated + // preference rather than send a 406 (Not Acceptable) response (see + // https://www.rfc-editor.org/rfc/rfc9110.html#section-12.1-5). + mediaType = runtime.ContentTypeCBORSequence + default: mediaType += ";stream=watch" } ctx := req.Context() // locate the appropriate embedded encoder based on the transform - var embeddedEncoder runtime.Encoder + var negotiatedEncoder runtime.Encoder contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req) if transform { info, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType) if !ok { return nil, fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer) } - embeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion()) + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + negotiatedEncoder = contentSerializer.EncoderForVersion(runtime.UseNondeterministicEncoding(info.Serializer), contentKind.GroupVersion()) + } else { + negotiatedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion()) + } } else { - embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion()) + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + negotiatedEncoder = scope.Serializer.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.Serializer), contentKind.GroupVersion()) + } else { + negotiatedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion()) + } } var memoryAllocator runtime.MemoryAllocator - if encoderWithAllocator, supportsAllocator := embeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator { + if encoderWithAllocator, supportsAllocator := negotiatedEncoder.(runtime.EncoderWithAllocator); supportsAllocator { // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. // instead, we allocate the buffer for the entire watch session and release it when we close the connection. memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) - embeddedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator) + negotiatedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator) } var tableOptions *metav1.TableOptions if options != nil { @@ -119,7 +141,7 @@ func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOp return nil, fmt.Errorf("unexpected options type: %T", options) } } - embeddedEncoder = newWatchEmbeddedEncoder(ctx, embeddedEncoder, mediaTypeOptions.Convert, tableOptions, scope) + embeddedEncoder := newWatchEmbeddedEncoder(ctx, negotiatedEncoder, mediaTypeOptions.Convert, tableOptions, scope) if encoderWithAllocator, supportsAllocator := encoder.(runtime.EncoderWithAllocator); supportsAllocator { if memoryAllocator == nil { @@ -145,6 +167,8 @@ func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOp Encoder: encoder, EmbeddedEncoder: embeddedEncoder, + watchListTransformerFn: newWatchListTransformer(initialEventsListBlueprint, mediaTypeOptions.Convert, negotiatedEncoder).transform, + MemoryAllocator: memoryAllocator, TimeoutFactory: &realTimeoutFactory{timeout}, ServerShuttingDownCh: serverShuttingDownCh, @@ -174,6 +198,10 @@ type WatchServer struct { Encoder runtime.Encoder // used to encode the nested object in the watch stream EmbeddedEncoder runtime.Encoder + // watchListTransformerFn a function applied + // to watchlist bookmark events that transforms + // the embedded object before sending it to a client. + watchListTransformerFn watchListTransformerFunction MemoryAllocator runtime.MemoryAllocator TimeoutFactory TimeoutFactory @@ -219,7 +247,7 @@ func (s *WatchServer) HandleHTTP(w http.ResponseWriter, req *http.Request) { flusher.Flush() kind := s.Scope.Kind - watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer) + watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer, s.watchListTransformerFn) ch := s.Watching.ResultChan() done := req.Context().Done() @@ -288,7 +316,7 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { framer := newWebsocketFramer(ws, s.UseTextFraming) kind := s.Scope.Kind - watchEncoder := newWatchEncoder(context.TODO(), kind, s.EmbeddedEncoder, s.Encoder, framer) + watchEncoder := newWatchEncoder(context.TODO(), kind, s.EmbeddedEncoder, s.Encoder, framer, s.watchListTransformerFn) ch := s.Watching.ResultChan() for { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 0a0fdde0d..f9dec9031 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -685,9 +685,27 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } - var resetFields map[fieldpath.APIVersion]*fieldpath.Set - if resetFieldsStrategy, isResetFieldsStrategy := storage.(rest.ResetFieldsStrategy); isResetFieldsStrategy { - resetFields = resetFieldsStrategy.GetResetFields() + // Strategies may ignore changes to some fields by resetting the field values. + // + // For instance, spec resource strategies should reset the status, and status subresource + // strategies should reset the spec. + // + // Strategies that reset fields must report to the field manager which fields are + // reset by implementing either the ResetFieldsStrategy or the ResetFieldsFilterStrategy + // interface. + // + // For subresources that provide write access to only specific nested fields + // fieldpath.NewPatternFilter can help create a filter to reset all other fields. + var resetFieldsFilter map[fieldpath.APIVersion]fieldpath.Filter + resetFieldsStrategy, isResetFieldsStrategy := storage.(rest.ResetFieldsStrategy) + if isResetFieldsStrategy { + resetFieldsFilter = fieldpath.NewExcludeFilterSetMap(resetFieldsStrategy.GetResetFields()) + } + if resetFieldsStrategy, isResetFieldsFilterStrategy := storage.(rest.ResetFieldsFilterStrategy); isResetFieldsFilterStrategy { + if isResetFieldsStrategy { + return nil, nil, fmt.Errorf("may not implement both ResetFieldsStrategy and ResetFieldsFilterStrategy") + } + resetFieldsFilter = resetFieldsStrategy.GetResetFieldsFilter() } reqScope.FieldManager, err = managedfields.NewDefaultFieldManager( @@ -698,7 +716,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag fqKindToRegister, reqScope.HubGroupVersion, subresource, - resetFields, + resetFieldsFilter, ) if err != nil { return nil, nil, fmt.Errorf("failed to create field manager: %v", err) @@ -875,7 +893,10 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag string(types.JSONPatchType), string(types.MergePatchType), string(types.StrategicMergePatchType), - string(types.ApplyPatchType), + string(types.ApplyYAMLPatchType), + } + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + supportedTypes = append(supportedTypes, string(types.ApplyCBORPatchType)) } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulPatchResource(patcher, reqScope, admit, supportedTypes)) handler = utilwarning.AddWarningsHandler(handler, warnings) @@ -1195,6 +1216,8 @@ func typeToJSON(typeName string) string { return "string" case "v1.IncludeObjectPolicy", "*v1.IncludeObjectPolicy": return "string" + case "*string": + return "string" // TODO: Fix these when go-restful supports a way to specify an array query param: // https://github.com/emicklei/go-restful/issues/225 diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 48fc951ad..0c7a3f6b9 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -416,6 +416,33 @@ func Reset() { } } +// ResetLabelAllowLists resets the label allow lists for all metrics. +// NOTE: This is only used for testing. +func ResetLabelAllowLists() { + for _, metric := range metrics { + if counterVec, ok := metric.(*compbasemetrics.CounterVec); ok { + counterVec.ResetLabelAllowLists() + continue + } + if gaugeVec, ok := metric.(*compbasemetrics.GaugeVec); ok { + gaugeVec.ResetLabelAllowLists() + continue + } + if histogramVec, ok := metric.(*compbasemetrics.HistogramVec); ok { + histogramVec.ResetLabelAllowLists() + continue + } + if summaryVec, ok := metric.(*compbasemetrics.SummaryVec); ok { + summaryVec.ResetLabelAllowLists() + continue + } + if timingHistogramVec, ok := metric.(*compbasemetrics.TimingHistogramVec); ok { + timingHistogramVec.ResetLabelAllowLists() + continue + } + } +} + // UpdateInflightRequestMetrics reports concurrency metrics classified by // mutating vs Readonly. func UpdateInflightRequestMetrics(phase string, nonmutating, mutating int) { diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 27d9761b8..c23343346 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -18,6 +18,7 @@ package features import ( "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" ) @@ -26,7 +27,6 @@ const ( // Every feature gate should add method here following this template: // // // owner: @username - // // alpha: v1.4 // MyFeature featuregate.Feature = "MyFeature" // // Feature gates should be listed in alphabetical, case-sensitive @@ -35,8 +35,6 @@ const ( // across the file. // owner: @ivelichkovich, @tallclair - // alpha: v1.27 - // beta: v1.28 // stable: v1.30 // kep: https://kep.k8s.io/3716 // @@ -44,8 +42,6 @@ const ( AdmissionWebhookMatchConditions featuregate.Feature = "AdmissionWebhookMatchConditions" // owner: @jefftree @alexzielenski - // alpha: v1.26 - // beta: v1.27 // stable: v1.30 // // Enables an single HTTP endpoint /discovery/ which supports native HTTP @@ -54,14 +50,20 @@ const ( // owner: @vinayakankugoyal // kep: https://kep.k8s.io/4633 - // alpha: v1.31 // // Allows us to enable anonymous auth for only certain apiserver endpoints. AnonymousAuthConfigurableEndpoints featuregate.Feature = "AnonymousAuthConfigurableEndpoints" + // owner: @stlaz @tkashem @dgrisonnet + // kep: https://kep.k8s.io/3926 + // + // Enables the cluster admin to identify resources that fail to + // decrypt or fail to be decoded into an object, and introduces + // a new delete option to allow deletion of such corrupt + // resources using the Kubernetes API only. + AllowUnsafeMalformedObjectDeletion featuregate.Feature = "AllowUnsafeMalformedObjectDeletion" + // owner: @smarterclayton - // alpha: v1.8 - // beta: v1.9 // stable: 1.29 // // Allow API clients to retrieve resource lists in chunks rather than @@ -69,63 +71,53 @@ const ( APIListChunking featuregate.Feature = "APIListChunking" // owner: @ilackams - // alpha: v1.7 - // beta: v1.16 // // Enables compression of REST responses (GET and LIST only) APIResponseCompression featuregate.Feature = "APIResponseCompression" // owner: @roycaihw - // alpha: v1.20 // // Assigns each kube-apiserver an ID in a cluster. APIServerIdentity featuregate.Feature = "APIServerIdentity" // owner: @dashpole - // alpha: v1.22 - // beta: v1.27 // // Add support for distributed tracing in the API Server APIServerTracing featuregate.Feature = "APIServerTracing" // owner: @linxiulei - // beta: v1.30 // // Enables serving watch requests in separate goroutines. APIServingWithRoutine featuregate.Feature = "APIServingWithRoutine" // owner: @deads2k // kep: https://kep.k8s.io/4601 - // alpha: v1.31 // // Allows authorization to use field and label selectors. AuthorizeWithSelectors featuregate.Feature = "AuthorizeWithSelectors" + // owner: @benluddy + // kep: https://kep.k8s.io/4222 + // + // Enables CBOR as a supported encoding for requests and responses, and as the + // preferred storage encoding for custom resources. + CBORServingAndStorage featuregate.Feature = "CBORServingAndStorage" + + // owner: @serathius + // + // Replaces watch cache hashmap implementation with a btree based one, bringing performance improvements. + BtreeWatchCache featuregate.Feature = "BtreeWatchCache" + // owner: @serathius - // beta: v1.31 // Enables concurrent watch object decoding to avoid starving watch cache when conversion webhook is installed. ConcurrentWatchObjectDecode featuregate.Feature = "ConcurrentWatchObjectDecode" - // owner: @cici37 @jpbetz - // kep: http://kep.k8s.io/3488 - // alpha: v1.26 - // beta: v1.28 - // stable: v1.30 - // - // Note: the feature gate can be removed in 1.32 - // Enables expression validation in Admission Control - ValidatingAdmissionPolicy featuregate.Feature = "ValidatingAdmissionPolicy" - // owner: @jefftree // kep: https://kep.k8s.io/4355 - // alpha: v1.31 // // Enables coordinated leader election in the API server CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection" - // alpha: v1.20 - // beta: v1.21 - // GA: v1.24 // // Allows for updating watchcache resource version with progress notify events. EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption" @@ -137,80 +129,46 @@ const ( // Enables KMS v1 API for encryption at rest. KMSv1 featuregate.Feature = "KMSv1" - // owner: @aramase - // kep: https://kep.k8s.io/3299 - // alpha: v1.25 - // beta: v1.27 - // stable: v1.29 - // - // Enables KMS v2 API for encryption at rest. - KMSv2 featuregate.Feature = "KMSv2" - - // owner: @enj - // kep: https://kep.k8s.io/3299 - // beta: v1.28 - // stable: v1.29 - // - // Enables the use of derived encryption keys with KMS v2. - KMSv2KDF featuregate.Feature = "KMSv2KDF" - - // owner: @alexzielenski, @cici37, @jiahuif + // owner: @alexzielenski, @cici37, @jiahuif, @jpbetz // kep: https://kep.k8s.io/3962 - // alpha: v1.30 // // Enables the MutatingAdmissionPolicy in Admission Chain MutatingAdmissionPolicy featuregate.Feature = "MutatingAdmissionPolicy" // owner: @jiahuif // kep: https://kep.k8s.io/2887 - // alpha: v1.23 - // beta: v1.24 // // Enables populating "enum" field of OpenAPI schemas // in the spec returned from kube-apiserver. OpenAPIEnums featuregate.Feature = "OpenAPIEnums" // owner: @caesarxuchao - // alpha: v1.15 - // beta: v1.16 // stable: 1.29 // // Allow apiservers to show a count of remaining items in the response // to a chunking list request. RemainingItemCount featuregate.Feature = "RemainingItemCount" + // owner: @stlaz + // + // Enable kube-apiserver to accept UIDs via request header authentication. + // This will also make the kube-apiserver's API aggregator add UIDs via standard + // headers when forwarding requests to the servers serving the aggregated API. + RemoteRequestHeaderUID featuregate.Feature = "RemoteRequestHeaderUID" + // owner: @wojtek-t - // beta: v1.31 // // Enables resilient watchcache initialization to avoid controlplane // overload. ResilientWatchCacheInitialization featuregate.Feature = "ResilientWatchCacheInitialization" // owner: @serathius - // beta: v1.30 // // Allow watch cache to create a watch on a dedicated RPC. // This prevents watch cache from being starved by other watches. SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" - // owner: @apelisse, @lavalamp - // alpha: v1.14 - // beta: v1.16 - // stable: v1.22 - // - // Server-side apply. Merging happens on the server. - ServerSideApply featuregate.Feature = "ServerSideApply" - - // owner: @kevindelgado - // kep: https://kep.k8s.io/2885 - // alpha: v1.23 - // beta: v1.24 - // - // Enables server-side field validation. - ServerSideFieldValidation featuregate.Feature = "ServerSideFieldValidation" - // owner: @enj - // beta: v1.29 // // Enables http2 DOS mitigations for unauthenticated clients. // @@ -228,13 +186,11 @@ const ( UnauthenticatedHTTP2DOSMitigation featuregate.Feature = "UnauthenticatedHTTP2DOSMitigation" // owner: @jpbetz - // alpha: v1.30 // Resource create requests using generateName are retried automatically by the apiserver // if the generated name conflicts with an existing resource name, up to a maximum number of 7 retries. RetryGenerateName featuregate.Feature = "RetryGenerateName" // owner: @cici37 - // alpha: v1.30 // // StrictCostEnforcementForVAP is used to apply strict CEL cost validation for ValidatingAdmissionPolicy. // It will be set to off by default for certain time of period to prevent the impact on the existing users. @@ -243,7 +199,6 @@ const ( StrictCostEnforcementForVAP featuregate.Feature = "StrictCostEnforcementForVAP" // owner: @cici37 - // alpha: v1.30 // // StrictCostEnforcementForWebhooks is used to apply strict CEL cost validation for matchConditions in Webhooks. // It will be set to off by default for certain time of period to prevent the impact on the existing users. @@ -252,14 +207,11 @@ const ( StrictCostEnforcementForWebhooks featuregate.Feature = "StrictCostEnforcementForWebhooks" // owner: @caesarxuchao @roycaihw - // alpha: v1.20 // // Enable the storage version API. StorageVersionAPI featuregate.Feature = "StorageVersionAPI" // owner: @caesarxuchao - // alpha: v1.14 - // beta: v1.15 // // Allow apiservers to expose the storage version hash in the discovery // document. @@ -267,69 +219,41 @@ const ( // owner: @aramase, @enj, @nabokihms // kep: https://kep.k8s.io/3331 - // alpha: v1.29 - // beta: v1.30 // // Enables Structured Authentication Configuration StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration" // owner: @palnabarun // kep: https://kep.k8s.io/3221 - // alpha: v1.29 - // beta: v1.30 // // Enables Structured Authorization Configuration StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration" // owner: @wojtek-t - // alpha: v1.15 - // beta: v1.16 - // GA: v1.17 // // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" // owner: @wojtek-t - // beta: v1.31 // // Enables post-start-hook for storage readiness WatchCacheInitializationPostStartHook featuregate.Feature = "WatchCacheInitializationPostStartHook" // owner: @serathius - // beta: 1.30 // Enables watches without resourceVersion to be served from storage. // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" - // owner: @vinaykul - // kep: http://kep.k8s.io/1287 - // alpha: v1.27 - // - // Enables In-Place Pod Vertical Scaling - InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling" - // owner: @p0lyn0mial - // alpha: v1.27 // // Allow the API server to stream individual items instead of chunking WatchList featuregate.Feature = "WatchList" // owner: @serathius // kep: http://kep.k8s.io/2340 - // alpha: v1.28 - // beta: v1.31 // // Allow the API server to serve consistent lists from cache ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache" - - // owner: @tkashem - // beta: v1.29 - // GA: v1.30 - // - // Allow Priority & Fairness in the API server to use a zero value for - // the 'nominalConcurrencyShares' field of the 'limited' section of a - // priority level. - ZeroLimitedNominalConcurrencyShares featuregate.Feature = "ZeroLimitedNominalConcurrencyShares" ) func init() { @@ -340,89 +264,181 @@ func init() { // defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. +// +// Entries are alphabetized and separated from each other with blank lines to avoid sweeping gofmt changes +// when adding or removing one entry. var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ - // Example: - // EmulationVersion: { - // {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, - // }, + AdmissionWebhookMatchConditions: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + AggregatedDiscoveryEndpoint: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + AllowUnsafeMalformedObjectDeletion: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + AnonymousAuthConfigurableEndpoints: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIListChunking: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.9"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + APIResponseCompression: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIServerIdentity: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIServerTracing: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIServingWithRoutine: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + }, + + BtreeWatchCache: { + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + AuthorizeWithSelectors: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + CBORServingAndStorage: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ConcurrentWatchObjectDecode: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + ConsistentListFromCache: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + CoordinatedLeaderElection: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + }, + + EfficientWatchResumption: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + KMSv1: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated}, + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + MutatingAdmissionPolicy: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + OpenAPIEnums: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta}, + }, + + RemainingItemCount: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + RemoteRequestHeaderUID: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ResilientWatchCacheInitialization: { + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + RetryGenerateName: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, + + SeparateCacheWatchRPC: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + }, + + StorageVersionAPI: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + }, + + StorageVersionHash: { + {Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta}, + }, + + StrictCostEnforcementForVAP: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + StrictCostEnforcementForWebhooks: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + StructuredAuthenticationConfiguration: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + StructuredAuthorizationConfiguration: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + UnauthenticatedHTTP2DOSMitigation: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + WatchBookmark: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.17"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + WatchCacheInitializationPostStartHook: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + WatchFromStorageWithoutResourceVersion: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta}, + }, + + WatchList: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, } -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. -// To add a new feature, define a key for it above and add it here. The features will be -// available throughout Kubernetes binaries. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - - AnonymousAuthConfigurableEndpoints: {Default: false, PreRelease: featuregate.Alpha}, - - AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - - APIServerIdentity: {Default: true, PreRelease: featuregate.Beta}, - - APIServerTracing: {Default: true, PreRelease: featuregate.Beta}, - - APIServingWithRoutine: {Default: false, PreRelease: featuregate.Alpha}, - - AuthorizeWithSelectors: {Default: false, PreRelease: featuregate.Alpha}, - - ConcurrentWatchObjectDecode: {Default: false, PreRelease: featuregate.Beta}, - - ValidatingAdmissionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - CoordinatedLeaderElection: {Default: false, PreRelease: featuregate.Alpha}, - - EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, - - KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - - RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - ResilientWatchCacheInitialization: {Default: true, PreRelease: featuregate.Beta}, - - RetryGenerateName: {Default: true, PreRelease: featuregate.Beta}, - - SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, - - ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, - - StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - - StrictCostEnforcementForVAP: {Default: false, PreRelease: featuregate.Beta}, - - StrictCostEnforcementForWebhooks: {Default: false, PreRelease: featuregate.Beta}, - - StructuredAuthenticationConfiguration: {Default: true, PreRelease: featuregate.Beta}, - - StructuredAuthorizationConfiguration: {Default: true, PreRelease: featuregate.Beta}, - - UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, - - WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - WatchCacheInitializationPostStartHook: {Default: false, PreRelease: featuregate.Beta}, - - WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, - - InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, - - WatchList: {Default: false, PreRelease: featuregate.Alpha}, - - ConsistentListFromCache: {Default: true, PreRelease: featuregate.Beta}, - - ZeroLimitedNominalConcurrencyShares: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 -} +// defaultKubernetesFeatureGates consists of legacy unversioned Kubernetes-specific feature keys. +// Please do not add to this struct and use defaultVersionedKubernetesFeatureGates instead. +var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS index c0e4923f6..bfd15f781 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -13,6 +13,7 @@ reviewers: - saad-ali - janetkuo - pwittrock - - ncdc - dims - enj +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/corrupt_obj_deleter.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/corrupt_obj_deleter.go new file mode 100644 index 000000000..4907da47f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/corrupt_obj_deleter.go @@ -0,0 +1,122 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "errors" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storage" + storeerr "k8s.io/apiserver/pkg/storage/errors" + + "k8s.io/klog/v2" + "k8s.io/utils/ptr" +) + +// the corrupt object deleter has the same interface as rest.GracefulDeleter +var _ rest.GracefulDeleter = &corruptObjectDeleter{} + +// NewCorruptObjectDeleter returns a deleter that can perform unsafe deletion +// of corrupt objects, it makes an attempt to perform a normal deletion flow +// first, and if the normal deletion flow fails with a corrupt object error +// then it performs the unsafe delete of the object. +// +// NOTE: it skips precondition checks, finalizer constraints, and any +// post deletion hook defined in 'AfterDelete' of the registry. +// +// WARNING: This may break the cluster if the resource being deleted has dependencies. +func NewCorruptObjectDeleter(store *Store) rest.GracefulDeleter { + return &corruptObjectDeleter{store: store} +} + +// corruptObjectDeleter implements unsafe object deletion flow +type corruptObjectDeleter struct { + store *Store +} + +// Delete performs an unsafe deletion of the given resource from the storage. +// +// NOTE: This function should NEVER be used for any normal deletion +// flow, it is exclusively used when the user enables +// 'IgnoreStoreReadErrorWithClusterBreakingPotential' in the delete options. +func (d *corruptObjectDeleter) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, opts *metav1.DeleteOptions) (runtime.Object, bool, error) { + if opts == nil || !ptr.Deref[bool](opts.IgnoreStoreReadErrorWithClusterBreakingPotential, false) { + // this is a developer error, we should never be here, since the unsafe + // deleter is wired in the rest layer only when the option is enabled + return nil, false, apierrors.NewInternalError(errors.New("initialization error, expected normal deletion flow to be used")) + } + + key, err := d.store.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + obj := d.store.NewFunc() + qualifiedResource := d.store.qualifiedResourceFromContext(ctx) + // use the storage implementation directly, bypass the dryRun layer + storageBackend := d.store.Storage.Storage + // we leave ResourceVersion as empty in the GetOptions so the + // object is retrieved from the underlying storage directly + err = storageBackend.Get(ctx, key, storage.GetOptions{}, obj) + if err == nil || !storage.IsCorruptObject(err) { + // TODO: The Invalid error should have a field for Resource. + // After that field is added, we should fill the Resource and + // leave the Kind field empty. See the discussion in #18526. + qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource} + fieldErrList := field.ErrorList{ + field.Invalid(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), true, "is exclusively used to delete corrupt object(s), try again by removing this option"), + } + return nil, false, apierrors.NewInvalid(qualifiedKind, name, fieldErrList) + } + + // try normal deletion anyway, it is expected to fail + obj, deleted, err := d.store.Delete(ctx, name, deleteValidation, opts) + if err == nil { + return obj, deleted, err + } + // TODO: unfortunately we can't do storage.IsCorruptObject(err), + // conversion to API error drops the inner error chain + if !strings.Contains(err.Error(), "corrupt object") { + return obj, deleted, err + } + + // TODO: at this instant, some actor may have a) managed to recreate this + // object by doing a delete+create, or b) the underlying error has resolved + // since the last time we checked, and the object is readable now. + klog.FromContext(ctx).V(1).Info("Going to perform unsafe object deletion", "object", klog.KRef(genericapirequest.NamespaceValue(ctx), name)) + out := d.store.NewFunc() + storageOpts := storage.DeleteOptions{IgnoreStoreReadError: true} + // dropping preconditions, and keeping the admission + if err := storageBackend.Delete(ctx, key, out, nil, storage.ValidateObjectFunc(deleteValidation), nil, storageOpts); err != nil { + if storage.IsNotFound(err) { + // the DELETE succeeded, but we don't have the object since it's + // not retrievable from the storage, so we send a nil object + return nil, false, nil + } + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + // the DELETE succeeded, but we don't have the object sine it's + // not retrievable from the storage, so we send a nil objct + return nil, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go index c8db56b2b..ecf545274 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go @@ -46,7 +46,7 @@ func (s *DryRunnableStorage) Create(ctx context.Context, key string, obj, out ru return s.Storage.Create(ctx, key, obj, out, ttl) } -func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, deleteValidation storage.ValidateObjectFunc, dryRun bool, cachedExistingObject runtime.Object) error { +func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, deleteValidation storage.ValidateObjectFunc, dryRun bool, cachedExistingObject runtime.Object, opts storage.DeleteOptions) error { if dryRun { if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err != nil { return err @@ -56,7 +56,7 @@ func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime } return deleteValidation(ctx, out) } - return s.Storage.Delete(ctx, key, out, preconditions, deleteValidation, cachedExistingObject) + return s.Storage.Delete(ctx, key, out, preconditions, deleteValidation, cachedExistingObject, opts) } func (s *DryRunnableStorage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 198c09f6b..ba64fc367 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -234,6 +234,18 @@ type Store struct { // If set, DestroyFunc has to be implemented in thread-safe way and // be prepared for being called more than once. DestroyFunc func() + + // corruptObjDeleter implements unsafe deletion flow to enable deletion + // of corrupt object(s), it makes an attempt to perform a normal + // deletion flow first, and if the normal deletion flow fails with a + // corrupt object error then it proceeds with the unsafe deletion + // of the object from the storage. + // NOTE: it skips precondition checks, finalizer constraints, and any + // after delete hook defined in 'AfterDelete' of the registry. + // WARNING: This may break the cluster if the resource has + // dependencies. Use when the cluster is broken, and there is no + // other viable option to repair the cluster. + corruptObjDeleter rest.GracefulDeleter } // Note: the rest.StandardStorage interface aggregates the common REST verbs @@ -244,6 +256,8 @@ var _ GenericStore = &Store{} var _ rest.SingularNameProvider = &Store{} +var _ rest.CorruptObjectDeleterProvider = &Store{} + const ( OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" resourceCountPollPeriodJitter = 1.2 @@ -344,6 +358,11 @@ func (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy { return e.DeleteStrategy } +// GetCorruptObjDeleter returns the unsafe corrupt object deleter +func (e *Store) GetCorruptObjDeleter() rest.GracefulDeleter { + return e.corruptObjDeleter +} + // List returns a list of items matching labels and field according to the // store's PredicateFunc. func (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { @@ -572,7 +591,7 @@ func (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, o out := e.NewFunc() klog.V(6).InfoS("Going to delete object from registry, triggered by update", "object", klog.KRef(genericapirequest.NamespaceValue(ctx), name)) // Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb. - if err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryrun.IsDryRun(options.DryRun), nil); err != nil { + if err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryrun.IsDryRun(options.DryRun), nil, storage.DeleteOptions{}); err != nil { // Deletion is racy, i.e., there could be multiple update // requests to remove all finalizers from the object, so we // ignore the NotFound error. @@ -1182,7 +1201,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V // delete immediately, or no graceful deletion supported klog.V(6).InfoS("Going to delete object from registry", "object", klog.KRef(genericapirequest.NamespaceValue(ctx), name)) out = e.NewFunc() - if err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun), nil); err != nil { + if err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun), nil, storage.DeleteOptions{}); err != nil { // Please refer to the place where we set ignoreNotFound for the reason // why we ignore the NotFound error . if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil { @@ -1631,6 +1650,10 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { e.ReadinessCheckFunc = e.Storage.Storage.ReadinessCheck } + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + e.corruptObjDeleter = NewCorruptObjectDeleter(e) + } + return nil } diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS index 4dd70bd88..e7207ecee 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -10,7 +10,8 @@ reviewers: - mikedanese - liggitt - justinsb - - ncdc - dims - ingvagabund - enj +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go index 03cea7bb7..f364635e8 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -22,12 +22,13 @@ import ( "net/http" "net/url" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) //TODO: @@ -387,6 +388,12 @@ type ResetFieldsStrategy interface { GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set } +// ResetFieldsFilterStrategy is an optional interface that a storage object can +// implement if it wishes to provide a fields filter reset by its strategies. +type ResetFieldsFilterStrategy interface { + GetResetFieldsFilter() map[fieldpath.APIVersion]fieldpath.Filter +} + // CreateUpdateResetFieldsStrategy is a union of RESTCreateUpdateStrategy // and ResetFieldsStrategy. type CreateUpdateResetFieldsStrategy interface { @@ -400,3 +407,10 @@ type UpdateResetFieldsStrategy interface { RESTUpdateStrategy ResetFieldsStrategy } + +// CorruptObjectDeleterProvider is an interface the storage implements +// to support unsafe deletion of corrupt object(s). It returns a +// GracefulDeleter that is used to perform unsafe deletion of corrupt object(s). +type CorruptObjectDeleterProvider interface { + GetCorruptObjDeleter() GracefulDeleter +} diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index 6da894919..ee037aefe 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -71,7 +71,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" - utilversion "k8s.io/apiserver/pkg/util/version" "k8s.io/client-go/informers" restclient "k8s.io/client-go/rest" "k8s.io/component-base/featuregate" @@ -79,6 +78,8 @@ import ( "k8s.io/component-base/metrics/features" "k8s.io/component-base/metrics/prometheus/slis" "k8s.io/component-base/tracing" + utilversion "k8s.io/component-base/version" + "k8s.io/component-base/zpages/flagz" "k8s.io/klog/v2" openapicommon "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/spec3" @@ -189,6 +190,7 @@ type Config struct { LivezChecks []healthz.HealthChecker // The default set of readyz-only checks. There might be more added via AddReadyzChecks dynamically. ReadyzChecks []healthz.HealthChecker + Flagz flagz.Reader // LegacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests // to InstallLegacyAPIGroup. New API servers don't generally have legacy groups at all. LegacyAPIGroupPrefixes sets.String @@ -742,7 +744,7 @@ func (c *RecommendedConfig) Complete() CompletedConfig { return c.Config.Complete(c.SharedInformerFactory) } -var allowedMediaTypes = []string{ +var defaultAllowedMediaTypes = []string{ runtime.ContentTypeJSON, runtime.ContentTypeYAML, runtime.ContentTypeProtobuf, @@ -755,6 +757,10 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G if c.Serializer == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil") } + allowedMediaTypes := defaultAllowedMediaTypes + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.CBORServingAndStorage) { + allowedMediaTypes = append(allowedMediaTypes, runtime.ContentTypeCBOR) + } for _, info := range c.Serializer.SupportedMediaTypes() { var ok bool for _, mt := range allowedMediaTypes { @@ -886,8 +892,8 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G genericApiServerHookName := "generic-apiserver-start-informers" if c.SharedInformerFactory != nil { if !s.isPostStartHookRegistered(genericApiServerHookName) { - err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error { - c.SharedInformerFactory.Start(context.StopCh) + err := s.AddPostStartHook(genericApiServerHookName, func(hookContext PostStartHookContext) error { + c.SharedInformerFactory.Start(hookContext.Done()) return nil }) if err != nil { @@ -904,8 +910,8 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G const priorityAndFairnessConfigConsumerHookName = "priority-and-fairness-config-consumer" if s.isPostStartHookRegistered(priorityAndFairnessConfigConsumerHookName) { } else if c.FlowControl != nil { - err := s.AddPostStartHook(priorityAndFairnessConfigConsumerHookName, func(context PostStartHookContext) error { - go c.FlowControl.Run(context.StopCh) + err := s.AddPostStartHook(priorityAndFairnessConfigConsumerHookName, func(hookContext PostStartHookContext) error { + go c.FlowControl.Run(hookContext.Done()) return nil }) if err != nil { @@ -920,8 +926,8 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G if c.FlowControl != nil { const priorityAndFairnessFilterHookName = "priority-and-fairness-filter" if !s.isPostStartHookRegistered(priorityAndFairnessFilterHookName) { - err := s.AddPostStartHook(priorityAndFairnessFilterHookName, func(context PostStartHookContext) error { - genericfilters.StartPriorityAndFairnessWatermarkMaintenance(context.StopCh) + err := s.AddPostStartHook(priorityAndFairnessFilterHookName, func(hookContext PostStartHookContext) error { + genericfilters.StartPriorityAndFairnessWatermarkMaintenance(hookContext.Done()) return nil }) if err != nil { @@ -931,8 +937,8 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G } else { const maxInFlightFilterHookName = "max-in-flight-filter" if !s.isPostStartHookRegistered(maxInFlightFilterHookName) { - err := s.AddPostStartHook(maxInFlightFilterHookName, func(context PostStartHookContext) error { - genericfilters.StartMaxInFlightWatermarkMaintenance(context.StopCh) + err := s.AddPostStartHook(maxInFlightFilterHookName, func(hookContext PostStartHookContext) error { + genericfilters.StartMaxInFlightWatermarkMaintenance(hookContext.Done()) return nil }) if err != nil { @@ -945,8 +951,8 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G if c.StorageObjectCountTracker != nil { const storageObjectCountTrackerHookName = "storage-object-count-tracker-hook" if !s.isPostStartHookRegistered(storageObjectCountTrackerHookName) { - if err := s.AddPostStartHook(storageObjectCountTrackerHookName, func(context PostStartHookContext) error { - go c.StorageObjectCountTracker.RunUntil(context.StopCh) + if err := s.AddPostStartHook(storageObjectCountTrackerHookName, func(hookContext PostStartHookContext) error { + go c.StorageObjectCountTracker.RunUntil(hookContext.Done()) return nil }); err != nil { return nil, err @@ -975,7 +981,7 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G s.listedPathProvider = routes.ListedPathProviders{s.listedPathProvider, delegationTarget} - installAPI(s, c.Config) + installAPI(name, s, c.Config) // use the UnprotectedHandler from the delegation target to ensure that we don't attempt to double authenticator, authorize, // or some other part of the filter chain in delegation cases. @@ -1072,7 +1078,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { return handler } -func installAPI(s *GenericAPIServer, c *Config) { +func installAPI(name string, s *GenericAPIServer, c *Config) { if c.EnableIndex { routes.Index{}.Install(s.listedPathProvider, s.Handler.NonGoRestfulMux) } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go index 1f32adf9e..0fcf82bd0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "crypto/x509" + "errors" "fmt" "os" "sync/atomic" @@ -210,7 +211,7 @@ func (c *DynamicFileCAContent) handleWatchEvent(e fsnotify.Event, w *fsnotify.Wa if !e.Has(fsnotify.Remove) && !e.Has(fsnotify.Rename) { return nil } - if err := w.Remove(c.filename); err != nil { + if err := w.Remove(c.filename); err != nil && !errors.Is(err, fsnotify.ErrNonExistentWatch) { klog.InfoS("Failed to remove file watch, it may have been deleted", "file", c.filename, "err", err) } if err := w.Add(c.filename); err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index ce9a3691a..0513b2822 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -22,13 +22,12 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/apis/apiserver/install" - "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" "k8s.io/utils/path" - "sigs.k8s.io/yaml" ) var cfgScheme = runtime.NewScheme() @@ -55,19 +54,13 @@ func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSe if err != nil { return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err) } - var decodedConfig v1beta1.EgressSelectorConfiguration - err = yaml.Unmarshal(data, &decodedConfig) + config, gvk, err := serializer.NewCodecFactory(cfgScheme, serializer.EnableStrict).UniversalDecoder().Decode(data, nil, nil) if err != nil { - // we got an error where the decode wasn't related to a missing type return nil, err } - if decodedConfig.Kind != "EgressSelectorConfiguration" { - return nil, fmt.Errorf("invalid service configuration object %q", decodedConfig.Kind) - } - internalConfig := &apiserver.EgressSelectorConfiguration{} - if err := cfgScheme.Convert(&decodedConfig, internalConfig, nil); err != nil { - // we got an error where the decode wasn't related to a missing type - return nil, err + internalConfig, ok := config.(*apiserver.EgressSelectorConfiguration) + if !ok { + return nil, fmt.Errorf("unexpected config type: %v", gvk) } return internalConfig, nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 1e91b9a31..decd9d6ca 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -266,17 +266,23 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque select { case <-shouldStartWatchCh: - watchCtx := utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal) - watchReq = r.WithContext(watchCtx) - h.handler.ServeHTTP(w, watchReq) - // Protect from the situation when request will not reach storage layer - // and the initialization signal will not be send. - // It has to happen before waiting on the resultCh below. - watchInitializationSignal.Signal() - // TODO: Consider finishing the request as soon as Handle call panics. - if err := <-resultCh; err != nil { - panic(err) - } + func() { + // TODO: if both goroutines panic, propagate the stack traces from both + // goroutines so they are logged properly: + defer func() { + // Protect from the situation when request will not reach storage layer + // and the initialization signal will not be send. + // It has to happen before waiting on the resultCh below. + watchInitializationSignal.Signal() + // TODO: Consider finishing the request as soon as Handle call panics. + if err := <-resultCh; err != nil { + panic(err) + } + }() + watchCtx := utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal) + watchReq = r.WithContext(watchCtx) + h.handler.ServeHTTP(w, watchReq) + }() case err := <-resultCh: if err != nil { panic(err) diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 52fb449ac..e810a4608 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor" "k8s.io/apimachinery/pkg/util/managedfields" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -51,9 +52,10 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/routes" "k8s.io/apiserver/pkg/storageversion" - utilversion "k8s.io/apiserver/pkg/util/version" + utilfeature "k8s.io/apiserver/pkg/util/feature" restclient "k8s.io/client-go/rest" "k8s.io/component-base/featuregate" + utilversion "k8s.io/component-base/version" "k8s.io/klog/v2" openapibuilder3 "k8s.io/kube-openapi/pkg/builder3" openapicommon "k8s.io/kube-openapi/pkg/common" @@ -989,6 +991,9 @@ func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupV // NewDefaultAPIGroupInfo returns an APIGroupInfo stubbed with "normal" values // exposed for easier composition from other packages func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec runtime.ParameterCodec, codecs serializer.CodecFactory) APIGroupInfo { + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + codecs = serializer.NewCodecFactory(scheme, serializer.WithSerializer(cbor.NewSerializerInfo)) + } return APIGroupInfo{ PrioritizedVersions: scheme.PrioritizedVersionsForGroup(group), VersionedResourcesStorageMap: map[string]map[string]rest.Storage{}, diff --git a/vendor/k8s.io/apiserver/pkg/server/handler.go b/vendor/k8s.io/apiserver/pkg/server/handler.go index 847a624e3..b829ade74 100644 --- a/vendor/k8s.io/apiserver/pkg/server/handler.go +++ b/vendor/k8s.io/apiserver/pkg/server/handler.go @@ -77,7 +77,6 @@ func NewAPIServerHandler(name string, s runtime.NegotiatedSerializer, handlerCha } gorestfulContainer := restful.NewContainer() - gorestfulContainer.ServeMux = http.NewServeMux() gorestfulContainer.Router(restful.CurlyRouter{}) // e.g. for proxy/{kind}/{name}/{*} gorestfulContainer.RecoverHandler(func(panicReason interface{}, httpWriter http.ResponseWriter) { logStackOnRecover(s, panicReason, httpWriter) diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index 76f5745b3..730929331 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -35,6 +35,8 @@ import ( "k8s.io/klog/v2" ) +const DefaultHealthzPath = "/healthz" + // HealthChecker is a named healthz checker. type HealthChecker interface { Name() string @@ -154,7 +156,7 @@ func NamedCheck(name string, check func(r *http.Request) error) HealthChecker { // exactly one call to InstallHandler. Calling InstallHandler more // than once for the same mux will result in a panic. func InstallHandler(mux mux, checks ...HealthChecker) { - InstallPathHandler(mux, "/healthz", checks...) + InstallPathHandler(mux, DefaultHealthzPath, checks...) } // InstallReadyzHandler registers handlers for health checking on the path diff --git a/vendor/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/apiserver/pkg/server/hooks.go index 1561d7a84..150b40b47 100644 --- a/vendor/k8s.io/apiserver/pkg/server/hooks.go +++ b/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -49,11 +49,6 @@ type PreShutdownHookFunc func() error type PostStartHookContext struct { // LoopbackClientConfig is a config for a privileged loopback connection to the API server LoopbackClientConfig *restclient.Config - // StopCh is the channel that will be closed when the server stops. - // - // Deprecated: use the PostStartHookContext itself instead, it contains a context that - // gets cancelled when the server stops. StopCh keeps getting provided for existing code. - StopCh <-chan struct{} // Context gets cancelled when the server stops. context.Context } @@ -165,7 +160,6 @@ func (s *GenericAPIServer) RunPostStartHooks(ctx context.Context) { context := PostStartHookContext{ LoopbackClientConfig: s.LoopbackClientConfig, - StopCh: ctx.Done(), Context: ctx, } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/vendor/k8s.io/apiserver/pkg/server/options/admission.go index 61085e94e..6b4669e45 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/admission.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -31,6 +31,7 @@ import ( "k8s.io/apiserver/pkg/admission/initializer" admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/mutating" validatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/validating" mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" @@ -90,7 +91,7 @@ func NewAdmissionOptions() *AdmissionOptions { // admission plugins. The apiserver always runs the validating ones // after all the mutating ones, so their relative order in this list // doesn't matter. - RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingadmissionpolicy.PluginName, validatingwebhook.PluginName}, + RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingadmissionpolicy.PluginName, mutatingwebhook.PluginName, validatingadmissionpolicy.PluginName, validatingwebhook.PluginName}, DefaultOffPlugins: sets.Set[string]{}, } server.RegisterAllAdmissionPlugins(options.Plugins) @@ -157,9 +158,9 @@ func (a *AdmissionOptions) ApplyTo( initializersChain := admission.PluginInitializers{genericInitializer} initializersChain = append(initializersChain, pluginInitializers...) - admissionPostStartHook := func(context server.PostStartHookContext) error { + admissionPostStartHook := func(hookContext server.PostStartHookContext) error { discoveryRESTMapper.Reset() - go utilwait.Until(discoveryRESTMapper.Reset, 30*time.Second, context.StopCh) + go utilwait.Until(discoveryRESTMapper.Reset, 30*time.Second, hookContext.Done()) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go index c40e4cf43..f88f73e72 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go @@ -29,8 +29,10 @@ import ( "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/dynamiccertificates" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -56,6 +58,7 @@ type RequestHeaderAuthenticationOptions struct { ClientCAFile string UsernameHeaders []string + UIDHeaders []string GroupHeaders []string ExtraHeaderPrefixes []string AllowedNames []string @@ -87,6 +90,20 @@ func (s *RequestHeaderAuthenticationOptions) Validate() []error { klog.Warningf("--requestheader-extra-headers-prefix is set without specifying the standard X-Remote-Extra- header prefix - API aggregation will not work") } + if !utilfeature.DefaultFeatureGate.Enabled(features.RemoteRequestHeaderUID) { + if len(s.UIDHeaders) > 0 { + allErrors = append(allErrors, fmt.Errorf("--requestheader-uid-headers requires the %q feature to be enabled", features.RemoteRequestHeaderUID)) + } + } else { + if err := checkForWhiteSpaceOnly("requestheader-uid-headers", s.UIDHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if len(s.UIDHeaders) > 0 && !caseInsensitiveHas(s.UIDHeaders, "X-Remote-Uid") { + // this was added later and so we are able to error out + allErrors = append(allErrors, fmt.Errorf("--requestheader-uid-headers is set without specifying the standard X-Remote-Uid header - API aggregation will not work")) + } + } + return allErrors } @@ -117,6 +134,9 @@ func (s *RequestHeaderAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { fs.StringSliceVar(&s.UsernameHeaders, "requestheader-username-headers", s.UsernameHeaders, ""+ "List of request headers to inspect for usernames. X-Remote-User is common.") + fs.StringSliceVar(&s.UIDHeaders, "requestheader-uid-headers", s.UIDHeaders, ""+ + "List of request headers to inspect for UIDs. X-Remote-Uid is suggested. Requires the RemoteRequestHeaderUID feature to be enabled.") + fs.StringSliceVar(&s.GroupHeaders, "requestheader-group-headers", s.GroupHeaders, ""+ "List of request headers to inspect for groups. X-Remote-Group is suggested.") @@ -148,6 +168,7 @@ func (s *RequestHeaderAuthenticationOptions) ToAuthenticationRequestHeaderConfig return &authenticatorfactory.RequestHeaderConfig{ UsernameHeaders: headerrequest.StaticStringSlice(s.UsernameHeaders), + UIDHeaders: headerrequest.StaticStringSlice(s.UIDHeaders), GroupHeaders: headerrequest.StaticStringSlice(s.GroupHeaders), ExtraHeaderPrefixes: headerrequest.StaticStringSlice(s.ExtraHeaderPrefixes), CAContentProvider: caBundleProvider, @@ -233,7 +254,13 @@ func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { CacheTTL: 10 * time.Second, ClientCert: ClientCertAuthenticationOptions{}, RequestHeader: RequestHeaderAuthenticationOptions{ - UsernameHeaders: []string{"x-remote-user"}, + UsernameHeaders: []string{"x-remote-user"}, + // we specifically don't default UID headers as these were introduced + // later (kube 1.32) and we don't want 3rd parties to be trusting the default headers + // before we can safely say that all KAS instances know they should + // remove them from an incoming request in its WithAuthentication handler. + // The defaulting will be enabled in a future (1.33+) version. + UIDHeaders: nil, GroupHeaders: []string{"x-remote-group"}, ExtraHeaderPrefixes: []string{"x-remote-extra-"}, }, @@ -423,6 +450,7 @@ func (s *DelegatingAuthenticationOptions) createRequestHeaderConfig(client kuber return &authenticatorfactory.RequestHeaderConfig{ CAContentProvider: dynamicRequestHeaderProvider, UsernameHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.UsernameHeaders)), + UIDHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.UIDHeaders)), GroupHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.GroupHeaders)), ExtraHeaderPrefixes: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.ExtraHeaderPrefixes)), AllowedClientNames: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.AllowedClientNames)), diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go index 0dac34021..4ef3d9b36 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go @@ -55,6 +55,7 @@ func newDynamicRequestHeaderController(client kubernetes.Interface) (*DynamicReq authenticationConfigMapNamespace, client, "requestheader-username-headers", + "requestheader-uid-headers", "requestheader-group-headers", "requestheader-extra-headers-prefix", "requestheader-allowed-names", diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 40879f78e..6056975cd 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -40,6 +40,7 @@ import ( "k8s.io/apiserver/pkg/apis/apiserver" apiserverv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" "k8s.io/apiserver/pkg/apis/apiserver/validation" + "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics" @@ -106,24 +107,35 @@ const ( var codecs serializer.CodecFactory -// this atomic bool allows us to swap enablement of the KMSv2KDF feature in tests +// this map allows us to swap enablement of the KMSv2KDF feature in tests // as the feature gate is now locked to true starting with v1.29 // Note: it cannot be set by an end user -var kdfDisabled atomic.Bool +// KDF enablement is tracked per KMS provider to allow tests to run in parallel. +var kdfEnabledPerKMS sync.Map // map[string]bool, KMS name -> KDF enabled // this function should only be called in tests to swap enablement of the KMSv2KDF feature -func SetKDFForTests(b bool) func() { - kdfDisabled.Store(!b) - return func() { - kdfDisabled.Store(false) +// Caller must guarantee that all KMS providers have distinct names across all tests. +func SetKDFForTests(kmsName string, b bool) func() { + if len(kmsName) == 0 { // guarantee that GetKDF("") returns the default value + panic("empty KMS name used in test") } + if _, loaded := kdfEnabledPerKMS.LoadOrStore(kmsName, b); loaded { + panic("duplicate KMS name used in test") + } + return func() { kdfEnabledPerKMS.Delete(kmsName) } } // this function should be used to determine enablement of the KMSv2KDF feature // instead of getting it from DefaultFeatureGate as the feature gate is now locked // to true starting with v1.29 -func GetKDF() bool { - return !kdfDisabled.Load() +// to allow integration tests to run in parallel, this "feature flag" can be set +// per KMS provider as long as all providers use distinct names. +func GetKDF(kmsName string) bool { + kdfEnabled, ok := kdfEnabledPerKMS.Load(kmsName) + if !ok { + return true // explicit config is missing, but KDF is enabled by default + } + return kdfEnabled.(bool) // this will panic if a non-bool ever gets stored, which should never happen } func init() { @@ -389,7 +401,7 @@ func (h *kmsv2PluginProbe) rotateDEKOnKeyIDChange(ctx context.Context, statusKey // this gate can only change during tests, but the check is cheap enough to always make // this allows us to easily exercise both modes without restarting the API server // TODO integration test that this dynamically takes effect - useSeed := GetKDF() + useSeed := GetKDF(h.name) stateUseSeed := state.EncryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED // state is valid and status keyID is unchanged from when we generated this DEK/seed so there is no need to rotate it @@ -760,10 +772,6 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserver.KMSConfiguratio }, nil case kmsAPIVersionV2: - if !utilfeature.DefaultFeatureGate.Enabled(features.KMSv2) { - return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName) - } - envelopeService, err := EnvelopeKMSv2ServiceFactory(ctx, config.Endpoint, config.Name, config.Timeout.Duration) if err != nil { return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %w", kmsName, err) @@ -1013,7 +1021,9 @@ var anyGroupAnyResource = schema.GroupResource{ Resource: "*", } -func transformerFromOverrides(transformerOverrides map[schema.GroupResource]storagevalue.Transformer, resource schema.GroupResource) storagevalue.Transformer { +func transformerFromOverrides(transformerOverrides map[schema.GroupResource]storagevalue.Transformer, resource schema.GroupResource) (out storagevalue.Transformer) { + defer func() { out = newRequestInfoTransformer(resource, out) }() + if transformer := transformerOverrides[resource]; transformer != nil { return transformer } @@ -1039,3 +1049,41 @@ func grYAMLString(gr schema.GroupResource) string { return gr.String() } + +var _ storagevalue.Transformer = &requestInfoTransformer{} + +type requestInfoTransformer struct { + baseValueCtx context.Context + delegate storagevalue.Transformer +} + +func newRequestInfoTransformer(resource schema.GroupResource, delegate storagevalue.Transformer) *requestInfoTransformer { + return &requestInfoTransformer{ + baseValueCtx: request.WithRequestInfo(context.Background(), &request.RequestInfo{IsResourceRequest: true, APIGroup: resource.Group, Resource: resource.Resource}), + delegate: delegate, + } +} + +func (l *requestInfoTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) ([]byte, bool, error) { + return l.delegate.TransformFromStorage(l.withBaseValueCtx(ctx), data, dataCtx) +} + +func (l *requestInfoTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) ([]byte, error) { + return l.delegate.TransformToStorage(l.withBaseValueCtx(ctx), data, dataCtx) +} + +func (l *requestInfoTransformer) withBaseValueCtx(ctx context.Context) context.Context { + return &joinValueContext{Context: ctx, baseValueCtx: l.baseValueCtx} +} + +type joinValueContext struct { + context.Context + baseValueCtx context.Context +} + +func (j *joinValueContext) Value(key any) any { + if val := j.Context.Value(key); val != nil { + return val + } + return j.baseValueCtx.Value(key) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 779c4e3da..2ead600f8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -120,8 +120,8 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - var kubeClient *kubernetes.Clientset - var dynamicClient *dynamic.DynamicClient + var kubeClient kubernetes.Interface + var dynamicClient dynamic.Interface if config.ClientConfig != nil { var err error kubeClient, err = kubernetes.NewForConfig(config.ClientConfig) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index 9246366dd..a4d31ef92 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -28,7 +28,8 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server" utilfeature "k8s.io/apiserver/pkg/util/feature" - utilversion "k8s.io/apiserver/pkg/util/version" + "k8s.io/component-base/featuregate" + utilversion "k8s.io/component-base/version" "github.com/spf13/pflag" ) @@ -94,22 +95,22 @@ type ServerRunOptions struct { ShutdownWatchTerminationGracePeriod time.Duration // ComponentGlobalsRegistry is the registry where the effective versions and feature gates for all components are stored. - ComponentGlobalsRegistry utilversion.ComponentGlobalsRegistry + ComponentGlobalsRegistry featuregate.ComponentGlobalsRegistry // ComponentName is name under which the server's global variabled are registered in the ComponentGlobalsRegistry. ComponentName string } func NewServerRunOptions() *ServerRunOptions { - if utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent) == nil { + if featuregate.DefaultComponentGlobalsRegistry.EffectiveVersionFor(featuregate.DefaultKubeComponent) == nil { featureGate := utilfeature.DefaultMutableFeatureGate effectiveVersion := utilversion.DefaultKubeEffectiveVersion() - utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(utilversion.DefaultKubeComponent, effectiveVersion, featureGate)) + utilruntime.Must(featuregate.DefaultComponentGlobalsRegistry.Register(featuregate.DefaultKubeComponent, effectiveVersion, featureGate)) } - return NewServerRunOptionsForComponent(utilversion.DefaultKubeComponent, utilversion.DefaultComponentGlobalsRegistry) + return NewServerRunOptionsForComponent(featuregate.DefaultKubeComponent, featuregate.DefaultComponentGlobalsRegistry) } -func NewServerRunOptionsForComponent(componentName string, componentGlobalsRegistry utilversion.ComponentGlobalsRegistry) *ServerRunOptions { +func NewServerRunOptionsForComponent(componentName string, componentGlobalsRegistry featuregate.ComponentGlobalsRegistry) *ServerRunOptions { defaults := server.NewConfig(serializer.CodecFactory{}) return &ServerRunOptions{ MaxRequestsInFlight: defaults.MaxRequestsInFlight, diff --git a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go index 7be62fad0..9084755aa 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/runtime" @@ -47,7 +47,7 @@ const apiserverService = "apiserver" var ( cfgScheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(cfgScheme) + codecs = serializer.NewCodecFactory(cfgScheme, serializer.EnableStrict) ) func init() { diff --git a/vendor/k8s.io/apiserver/pkg/server/plugins.go b/vendor/k8s.io/apiserver/pkg/server/plugins.go index 239044641..37a49af6b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/plugins.go +++ b/vendor/k8s.io/apiserver/pkg/server/plugins.go @@ -20,6 +20,7 @@ package server import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/mutating" validatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/validating" mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" @@ -31,4 +32,5 @@ func RegisterAllAdmissionPlugins(plugins *admission.Plugins) { validatingwebhook.Register(plugins) mutatingwebhook.Register(plugins) validatingadmissionpolicy.Register(plugins) + mutatingadmissionpolicy.Register(plugins) } diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go index f53fdb832..ce1bec676 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" apimachineryversion "k8s.io/apimachinery/pkg/util/version" - "k8s.io/apiserver/pkg/util/version" + version "k8s.io/component-base/version" ) type ResourceEncodingConfig interface { @@ -117,6 +117,10 @@ type introducedInterface interface { APILifecycleIntroduced() (major, minor int) } +type replacementInterface interface { + APILifecycleReplacement() schema.GroupVersionKind +} + func emulatedStorageVersion(binaryVersionOfResource schema.GroupVersion, example runtime.Object, effectiveVersion version.EffectiveVersion, scheme *runtime.Scheme) (schema.GroupVersion, error) { if example == nil || effectiveVersion == nil { return binaryVersionOfResource, nil @@ -170,6 +174,14 @@ func emulatedStorageVersion(binaryVersionOfResource schema.GroupVersion, example // If it was introduced after current compatibility version, don't use it // skip the introduced check for test when currentVersion is 0.0 to test all apis if introduced, hasIntroduced := exampleOfGVK.(introducedInterface); hasIntroduced && (compatibilityVersion.Major() > 0 || compatibilityVersion.Minor() > 0) { + + // Skip versions that have a replacement. + // This can be used to override this storage version selection by + // marking a storage version has having a replacement and preventing a + // that storage version from being selected. + if _, hasReplacement := exampleOfGVK.(replacementInterface); hasReplacement { + continue + } // API resource lifecycles should be relative to k8s api version majorIntroduced, minorIntroduced := introduced.APILifecycleIntroduced() introducedVer := apimachineryversion.MajorMinor(uint(majorIntroduced), uint(minorIntroduced)) diff --git a/vendor/k8s.io/apiserver/pkg/storage/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/OWNERS index 044ecb9f6..b68cef1fe 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/storage/OWNERS @@ -10,7 +10,6 @@ reviewers: - caesarxuchao - mikedanese - liggitt - - ncdc - ingvagabund - enj - stevekuznetsov @@ -19,3 +18,5 @@ emeritus_approvers: - xiang90 - timothysc - lavalamp +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go index 595fd5036..c06aefe3d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go @@ -454,6 +454,13 @@ func (c *cacheWatcher) processInterval(ctx context.Context, cacheInterval *watch const initProcessThreshold = 500 * time.Millisecond startTime := time.Now() + // cacheInterval may be created from a version being more fresh than requested + // (e.g. for NotOlderThan semantic). In such a case, we need to prevent watch event + // with lower resourceVersion from being delivered to ensure watch contract. + if cacheInterval.resourceVersion > resourceVersion { + resourceVersion = cacheInterval.resourceVersion + } + initEventCount := 0 for { event, err := cacheInterval.Next() @@ -503,6 +510,10 @@ func (c *cacheWatcher) processInterval(ctx context.Context, cacheInterval *watch klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", initEventCount, c.groupResource, c.identifier, processingTime) } + // send bookmark after sending all events in cacheInterval for watchlist request + if cacheInterval.initialEventsEndBookmark != nil { + c.sendWatchCacheEvent(cacheInterval.initialEventsEndBookmark) + } c.process(ctx, resourceVersion) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 48791bd7b..a5b5506dc 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -492,7 +492,7 @@ func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object // Delete implements storage.Interface. func (c *Cacher) Delete( ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, - validateDeletion storage.ValidateObjectFunc, _ runtime.Object) error { + validateDeletion storage.ValidateObjectFunc, _ runtime.Object, opts storage.DeleteOptions) error { // Ignore the suggestion and try to pass down the current version of the object // read from cache. if elem, exists, err := c.watchCache.GetByKey(key); err != nil { @@ -501,10 +501,10 @@ func (c *Cacher) Delete( // DeepCopy the object since we modify resource version when serializing the // current object. currObj := elem.(*storeElement).Object.DeepCopyObject() - return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj) + return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj, opts) } // If we couldn't get the object, fallback to no-suggestion. - return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil) + return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil, opts) } type namespacedName struct { @@ -653,6 +653,8 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions return newErrWatcher(err), nil } + c.setInitialEventsEndBookmarkIfRequested(cacheInterval, opts, c.watchCache.resourceVersion) + addedWatcher := false func() { c.Lock() @@ -693,9 +695,15 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions // Get implements storage.Interface. func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + ctx, span := tracing.Start(ctx, "cacher.Get", + attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)), + attribute.String("key", key), + attribute.String("resource-version", opts.ResourceVersion)) + defer span.End(500 * time.Millisecond) if opts.ResourceVersion == "" { // If resourceVersion is not specified, serve it from underlying // storage (for backward compatibility). + span.AddEvent("About to Get from underlying storage") return c.storage.Get(ctx, key, opts, objPtr) } @@ -703,6 +711,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o if !c.ready.check() { // If Cache is not initialized, delegate Get requests to storage // as described in https://kep.k8s.io/4568 + span.AddEvent("About to Get from underlying storage - cache not initialized") return c.storage.Get(ctx, key, opts, objPtr) } } @@ -722,6 +731,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o if getRV == 0 && !c.ready.check() { // If Cacher is not yet initialized and we don't require any specific // minimal resource version, simply forward the request to storage. + span.AddEvent("About to Get from underlying storage - cache not initialized and no resourceVersion set") return c.storage.Get(ctx, key, opts, objPtr) } if err := c.ready.wait(ctx); err != nil { @@ -734,6 +744,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o return err } + span.AddEvent("About to fetch object from cache") obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(ctx, getRV, key) if err != nil { return err @@ -856,7 +867,7 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio } } - ctx, span := tracing.Start(ctx, "cacher list", + ctx, span := tracing.Start(ctx, "cacher.GetList", attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)), attribute.Stringer("type", c.groupResource)) defer span.End(500 * time.Millisecond) @@ -1119,6 +1130,9 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) { // Since add() can block, we explicitly add when cacher is unlocked. // Dispatching event in nonblocking way first, which make faster watchers // not be blocked by slower ones. + // + // Note: if we ever decide to cache the serialization of bookmark events, + // we will also need to modify the watchEncoder encoder if event.Type == watch.Bookmark { for _, watcher := range c.watchersBuffer { watcher.nonblockingAdd(event) @@ -1439,6 +1453,26 @@ func (c *Cacher) Wait(ctx context.Context) error { return c.ready.wait(ctx) } +// setInitialEventsEndBookmarkIfRequested sets initialEventsEndBookmark field in watchCacheInterval for watchlist request +func (c *Cacher) setInitialEventsEndBookmarkIfRequested(cacheInterval *watchCacheInterval, opts storage.ListOptions, currentResourceVersion uint64) { + if opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks { + // We don't need to set the InitialEventsAnnotation for this bookmark event, + // because this will be automatically set during event conversion in cacheWatcher.convertToWatchEvent method + initialEventsEndBookmark := &watchCacheEvent{ + Type: watch.Bookmark, + Object: c.newFunc(), + ResourceVersion: currentResourceVersion, + } + + if err := c.versioner.UpdateObject(initialEventsEndBookmark.Object, initialEventsEndBookmark.ResourceVersion); err != nil { + klog.Errorf("failure to set resourceVersion to %d on initialEventsEndBookmark event %+v for watchlist request and wait for bookmark trigger to send", initialEventsEndBookmark.ResourceVersion, initialEventsEndBookmark.Object) + initialEventsEndBookmark = nil + } + + cacheInterval.initialEventsEndBookmark = initialEventsEndBookmark + } +} + // errWatcher implements watch.Interface to return a single error type errWatcher struct { result chan watch.Event diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/store.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/store.go new file mode 100644 index 000000000..8edad10a2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/store.go @@ -0,0 +1,141 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "fmt" + + "github.com/google/btree" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/tools/cache" +) + +const ( + // btreeDegree defines the degree of btree storage. + // Decided based on the benchmark results (below). + // Selected the lowest degree from three options with best runtime (16,32,128). + // │ 2 │ 4 │ 8 │ 16 │ 32 │ 64 │ 128 │ + // │ sec/op │ sec/op vs base │ sec/op vs base │ sec/op vs base │ sec/op vs base │ sec/op vs base │ sec/op vs base │ + // StoreCreateList/RV=NotOlderThan-24 473.0µ ± 11% 430.1µ ± 9% -9.08% (p=0.005 n=10) 427.9µ ± 6% -9.54% (p=0.002 n=10) 403.9µ ± 8% -14.62% (p=0.000 n=10) 401.0µ ± 4% -15.22% (p=0.000 n=10) 408.0µ ± 4% -13.75% (p=0.000 n=10) 385.9µ ± 4% -18.42% (p=0.000 n=10) + // StoreCreateList/RV=ExactMatch-24 604.7µ ± 4% 596.7µ ± 8% ~ (p=0.529 n=10) 604.6µ ± 4% ~ (p=0.971 n=10) 601.1µ ± 4% ~ (p=0.853 n=10) 611.0µ ± 6% ~ (p=0.105 n=10) 598.2µ ± 5% ~ (p=0.579 n=10) 608.2µ ± 3% ~ (p=0.796 n=10) + // StoreList/List=All/Paginate=False/RV=Empty-24 729.1µ ± 5% 692.9µ ± 3% -4.96% (p=0.002 n=10) 693.7µ ± 3% -4.86% (p=0.000 n=10) 688.3µ ± 1% -5.59% (p=0.000 n=10) 690.4µ ± 5% -5.31% (p=0.002 n=10) 689.7µ ± 2% -5.40% (p=0.000 n=10) 687.8µ ± 3% -5.67% (p=0.000 n=10) + // StoreList/List=All/Paginate=True/RV=Empty-24 19.51m ± 2% 19.84m ± 2% ~ (p=0.105 n=10) 19.89m ± 3% ~ (p=0.190 n=10) 19.64m ± 4% ~ (p=0.853 n=10) 19.34m ± 4% ~ (p=0.481 n=10) 20.22m ± 4% +3.66% (p=0.007 n=10) 19.58m ± 4% ~ (p=0.912 n=10) + // StoreList/List=Namespace/Paginate=False/RV=Empty-24 1.672m ± 4% 1.635m ± 2% ~ (p=0.247 n=10) 1.673m ± 5% ~ (p=0.631 n=10) 1.657m ± 2% ~ (p=0.971 n=10) 1.656m ± 4% ~ (p=0.739 n=10) 1.678m ± 2% ~ (p=0.631 n=10) 1.718m ± 8% ~ (p=0.105 n=10) + // geomean 1.467m 1.420m -3.24% 1.430m -2.58% 1.403m -4.38% 1.402m -4.46% 1.417m -3.44% 1.403m -4.41% + // + // │ 2 │ 4 │ 8 │ 16 │ 32 │ 64 │ 128 │ + // │ B/op │ B/op vs base │ B/op vs base │ B/op vs base │ B/op vs base │ B/op vs base │ B/op vs base │ + // StoreCreateList/RV=NotOlderThan-24 98.58Ki ± 11% 101.33Ki ± 13% ~ (p=0.280 n=10) 99.80Ki ± 26% ~ (p=0.353 n=10) 109.63Ki ± 9% ~ (p=0.075 n=10) 112.56Ki ± 6% +14.18% (p=0.007 n=10) 114.41Ki ± 10% +16.05% (p=0.003 n=10) 115.06Ki ± 12% +16.72% (p=0.011 n=10) + // StoreCreateList/RV=ExactMatch-24 117.1Ki ± 0% 117.5Ki ± 0% ~ (p=0.218 n=10) 116.9Ki ± 0% ~ (p=0.052 n=10) 117.3Ki ± 0% ~ (p=0.353 n=10) 116.9Ki ± 0% ~ (p=0.075 n=10) 117.0Ki ± 0% ~ (p=0.436 n=10) 117.0Ki ± 0% ~ (p=0.280 n=10) + // StoreList/List=All/Paginate=False/RV=Empty-24 6.023Mi ± 0% 6.024Mi ± 0% +0.01% (p=0.037 n=10) 6.024Mi ± 0% ~ (p=0.493 n=10) 6.024Mi ± 0% +0.01% (p=0.035 n=10) 6.024Mi ± 0% ~ (p=0.247 n=10) 6.024Mi ± 0% ~ (p=0.247 n=10) 6.024Mi ± 0% ~ (p=0.315 n=10) + // StoreList/List=All/Paginate=True/RV=Empty-24 64.22Mi ± 0% 64.21Mi ± 0% ~ (p=0.075 n=10) 64.23Mi ± 0% ~ (p=0.280 n=10) 64.21Mi ± 0% -0.02% (p=0.002 n=10) 64.22Mi ± 0% ~ (p=0.579 n=10) 64.22Mi ± 0% ~ (p=0.971 n=10) 64.22Mi ± 0% ~ (p=1.000 n=10) + // StoreList/List=Namespace/Paginate=False/RV=Empty-24 8.177Mi ± 0% 8.178Mi ± 0% ~ (p=0.579 n=10) 8.177Mi ± 0% ~ (p=0.971 n=10) 8.179Mi ± 0% ~ (p=0.579 n=10) 8.178Mi ± 0% ~ (p=0.739 n=10) 8.179Mi ± 0% ~ (p=0.315 n=10) 8.176Mi ± 0% ~ (p=0.247 n=10) + // geomean 2.034Mi 2.047Mi +0.61% 2.039Mi +0.22% 2.079Mi +2.19% 2.088Mi +2.66% 2.095Mi +3.01% 2.098Mi +3.12% + // + // │ 2 │ 4 │ 8 │ 16 │ 32 │ 64 │ 128 │ + // │ allocs/op │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ + // StoreCreateList/RV=NotOlderThan-24 560.0 ± 0% 558.0 ± 0% -0.36% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) 558.0 ± 0% -0.36% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) + // StoreCreateList/RV=ExactMatch-24 871.0 ± 0% 870.0 ± 0% -0.11% (p=0.038 n=10) 870.0 ± 0% -0.11% (p=0.004 n=10) 870.0 ± 0% -0.11% (p=0.005 n=10) 869.0 ± 0% -0.23% (p=0.000 n=10) 870.0 ± 0% -0.11% (p=0.001 n=10) 870.0 ± 0% -0.11% (p=0.000 n=10) + // StoreList/List=All/Paginate=False/RV=Empty-24 351.0 ± 3% 358.0 ± 1% +1.99% (p=0.034 n=10) 352.5 ± 3% ~ (p=0.589 n=10) 358.5 ± 1% +2.14% (p=0.022 n=10) 356.5 ± 3% ~ (p=0.208 n=10) 355.0 ± 3% ~ (p=0.224 n=10) 355.0 ± 3% ~ (p=0.183 n=10) + // StoreList/List=All/Paginate=True/RV=Empty-24 494.4k ± 0% 494.4k ± 0% ~ (p=0.424 n=10) 494.6k ± 0% +0.06% (p=0.000 n=10) 492.7k ± 0% -0.34% (p=0.000 n=10) 494.5k ± 0% +0.02% (p=0.009 n=10) 493.0k ± 0% -0.28% (p=0.000 n=10) 494.4k ± 0% ~ (p=0.424 n=10) + // StoreList/List=Namespace/Paginate=False/RV=Empty-24 32.43k ± 0% 32.44k ± 0% ~ (p=0.579 n=10) 32.43k ± 0% ~ (p=0.971 n=10) 32.45k ± 0% ~ (p=0.517 n=10) 32.44k ± 0% ~ (p=0.670 n=10) 32.46k ± 0% ~ (p=0.256 n=10) 32.41k ± 0% ~ (p=0.247 n=10) + // geomean 4.872k 4.887k +0.31% 4.870k -0.03% 4.885k +0.28% 4.880k +0.17% 4.875k +0.06% 4.876k +0.08% + btreeDegree = 16 +) + +type storeIndexer interface { + Add(obj interface{}) error + Update(obj interface{}) error + Delete(obj interface{}) error + List() []interface{} + ListKeys() []string + Get(obj interface{}) (item interface{}, exists bool, err error) + GetByKey(key string) (item interface{}, exists bool, err error) + Replace([]interface{}, string) error + ByIndex(indexName, indexedValue string) ([]interface{}, error) +} + +type orderedLister interface { + ListPrefix(prefix, continueKey string, limit int) (items []interface{}, hasMore bool) +} + +func newStoreIndexer(indexers *cache.Indexers) storeIndexer { + if utilfeature.DefaultFeatureGate.Enabled(features.BtreeWatchCache) { + return newThreadedBtreeStoreIndexer(storeElementIndexers(indexers), btreeDegree) + } + return cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)) +} + +// Computing a key of an object is generally non-trivial (it performs +// e.g. validation underneath). Similarly computing object fields and +// labels. To avoid computing them multiple times (to serve the event +// in different List/Watch requests), in the underlying store we are +// keeping structs (key, object, labels, fields). +type storeElement struct { + Key string + Object runtime.Object + Labels labels.Set + Fields fields.Set +} + +func (t *storeElement) Less(than btree.Item) bool { + return t.Key < than.(*storeElement).Key +} + +var _ btree.Item = (*storeElement)(nil) + +func storeElementKey(obj interface{}) (string, error) { + elem, ok := obj.(*storeElement) + if !ok { + return "", fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Key, nil +} + +func storeElementObject(obj interface{}) (runtime.Object, error) { + elem, ok := obj.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Object, nil +} + +func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc { + return func(obj interface{}) (strings []string, e error) { + seo, err := storeElementObject(obj) + if err != nil { + return nil, err + } + return objIndexFunc(seo) + } +} + +func storeElementIndexers(indexers *cache.Indexers) cache.Indexers { + if indexers == nil { + return cache.Indexers{} + } + ret := cache.Indexers{} + for indexName, indexFunc := range *indexers { + ret[indexName] = storeElementIndexFunc(indexFunc) + } + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/store_btree.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/store_btree.go new file mode 100644 index 000000000..b4af96920 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/store_btree.go @@ -0,0 +1,393 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "fmt" + "math" + "strings" + "sync" + + "github.com/google/btree" + "k8s.io/client-go/tools/cache" +) + +// newThreadedBtreeStoreIndexer returns a storage for cacher by adding locking over the two 2 data structures: +// * btree based storage for efficient LIST operation on prefix +// * map based indexer for retrieving values by index. +// This separation is used to allow independent snapshotting those two storages in the future. +// Intention is to utilize btree for its cheap snapshots that don't require locking if don't mutate data. +func newThreadedBtreeStoreIndexer(indexers cache.Indexers, degree int) *threadedStoreIndexer { + return &threadedStoreIndexer{ + store: newBtreeStore(degree), + indexer: newIndexer(indexers), + } +} + +type threadedStoreIndexer struct { + lock sync.RWMutex + store btreeStore + indexer indexer +} + +func (si *threadedStoreIndexer) Add(obj interface{}) error { + return si.addOrUpdate(obj) +} + +func (si *threadedStoreIndexer) Update(obj interface{}) error { + return si.addOrUpdate(obj) +} + +func (si *threadedStoreIndexer) addOrUpdate(obj interface{}) error { + if obj == nil { + return fmt.Errorf("obj cannot be nil") + } + newElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + si.lock.Lock() + defer si.lock.Unlock() + oldElem := si.store.addOrUpdateElem(newElem) + return si.indexer.updateElem(newElem.Key, oldElem, newElem) +} + +func (si *threadedStoreIndexer) Delete(obj interface{}) error { + storeElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + si.lock.Lock() + defer si.lock.Unlock() + oldObj := si.store.deleteElem(storeElem) + if oldObj == nil { + return nil + } + return si.indexer.updateElem(storeElem.Key, oldObj.(*storeElement), nil) +} + +func (si *threadedStoreIndexer) List() []interface{} { + si.lock.RLock() + defer si.lock.RUnlock() + return si.store.List() +} + +func (si *threadedStoreIndexer) ListPrefix(prefix, continueKey string, limit int) ([]interface{}, bool) { + si.lock.RLock() + defer si.lock.RUnlock() + return si.store.ListPrefix(prefix, continueKey, limit) +} + +func (si *threadedStoreIndexer) ListKeys() []string { + si.lock.RLock() + defer si.lock.RUnlock() + return si.store.ListKeys() +} + +func (si *threadedStoreIndexer) Get(obj interface{}) (item interface{}, exists bool, err error) { + si.lock.RLock() + defer si.lock.RUnlock() + return si.store.Get(obj) +} + +func (si *threadedStoreIndexer) GetByKey(key string) (item interface{}, exists bool, err error) { + si.lock.RLock() + defer si.lock.RUnlock() + return si.store.GetByKey(key) +} + +func (si *threadedStoreIndexer) Replace(objs []interface{}, resourceVersion string) error { + si.lock.Lock() + defer si.lock.Unlock() + err := si.store.Replace(objs, resourceVersion) + if err != nil { + return err + } + return si.indexer.Replace(objs, resourceVersion) +} + +func (si *threadedStoreIndexer) ByIndex(indexName, indexValue string) ([]interface{}, error) { + si.lock.RLock() + defer si.lock.RUnlock() + return si.indexer.ByIndex(indexName, indexValue) +} + +func newBtreeStore(degree int) btreeStore { + return btreeStore{ + tree: btree.New(degree), + } +} + +type btreeStore struct { + tree *btree.BTree +} + +func (s *btreeStore) Add(obj interface{}) error { + if obj == nil { + return fmt.Errorf("obj cannot be nil") + } + storeElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + s.addOrUpdateElem(storeElem) + return nil +} + +func (s *btreeStore) Update(obj interface{}) error { + if obj == nil { + return fmt.Errorf("obj cannot be nil") + } + storeElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + s.addOrUpdateElem(storeElem) + return nil +} + +func (s *btreeStore) Delete(obj interface{}) error { + if obj == nil { + return fmt.Errorf("obj cannot be nil") + } + storeElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + s.deleteElem(storeElem) + return nil +} + +func (s *btreeStore) deleteElem(storeElem *storeElement) interface{} { + return s.tree.Delete(storeElem) +} + +func (s *btreeStore) List() []interface{} { + items := make([]interface{}, 0, s.tree.Len()) + s.tree.Ascend(func(i btree.Item) bool { + items = append(items, i.(interface{})) + return true + }) + return items +} + +func (s *btreeStore) ListKeys() []string { + items := make([]string, 0, s.tree.Len()) + s.tree.Ascend(func(i btree.Item) bool { + items = append(items, i.(*storeElement).Key) + return true + }) + return items +} + +func (s *btreeStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + storeElem, ok := obj.(*storeElement) + if !ok { + return nil, false, fmt.Errorf("obj is not a storeElement") + } + item = s.tree.Get(storeElem) + if item == nil { + return nil, false, nil + } + return item, true, nil +} + +func (s *btreeStore) GetByKey(key string) (item interface{}, exists bool, err error) { + return s.getByKey(key) +} + +func (s *btreeStore) Replace(objs []interface{}, _ string) error { + s.tree.Clear(false) + for _, obj := range objs { + storeElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + s.addOrUpdateElem(storeElem) + } + return nil +} + +// addOrUpdateLocked assumes a lock is held and is used for Add +// and Update operations. +func (s *btreeStore) addOrUpdateElem(storeElem *storeElement) *storeElement { + oldObj := s.tree.ReplaceOrInsert(storeElem) + if oldObj == nil { + return nil + } + return oldObj.(*storeElement) +} + +func (s *btreeStore) getByKey(key string) (item interface{}, exists bool, err error) { + keyElement := &storeElement{Key: key} + item = s.tree.Get(keyElement) + return item, item != nil, nil +} + +func (s *btreeStore) ListPrefix(prefix, continueKey string, limit int) ([]interface{}, bool) { + if limit < 0 { + return nil, false + } + if continueKey == "" { + continueKey = prefix + } + var result []interface{} + var hasMore bool + if limit == 0 { + limit = math.MaxInt + } + s.tree.AscendGreaterOrEqual(&storeElement{Key: continueKey}, func(i btree.Item) bool { + elementKey := i.(*storeElement).Key + if !strings.HasPrefix(elementKey, prefix) { + return false + } + // TODO: Might be worth to lookup one more item to provide more accurate HasMore. + if len(result) >= limit { + hasMore = true + return false + } + result = append(result, i.(interface{})) + return true + }) + return result, hasMore +} + +func (s *btreeStore) Count(prefix, continueKey string) (count int) { + if continueKey == "" { + continueKey = prefix + } + s.tree.AscendGreaterOrEqual(&storeElement{Key: continueKey}, func(i btree.Item) bool { + elementKey := i.(*storeElement).Key + if !strings.HasPrefix(elementKey, prefix) { + return false + } + count++ + return true + }) + return count +} + +// newIndexer returns a indexer similar to storeIndex from client-go/tools/cache. +// TODO: Unify the indexer code with client-go/cache package. +// Major differences is type of values stored and their mutability: +// * Indexer in client-go stores object keys, that are not mutable. +// * Indexer in cacher stores whole objects, which is mutable. +// Indexer in client-go uses keys as it is used in conjunction with map[key]value +// allowing for fast value retrieval, while btree used in cacher would provide additional overhead. +// Difference in mutability of stored values is used for optimizing some operations in client-go Indexer. +func newIndexer(indexers cache.Indexers) indexer { + return indexer{ + indices: map[string]map[string]map[string]*storeElement{}, + indexers: indexers, + } +} + +type indexer struct { + indices map[string]map[string]map[string]*storeElement + indexers cache.Indexers +} + +func (i *indexer) ByIndex(indexName, indexValue string) ([]interface{}, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("index with name %s does not exist", indexName) + } + index := i.indices[indexName] + set := index[indexValue] + list := make([]interface{}, 0, len(set)) + for _, obj := range set { + list = append(list, obj) + } + return list, nil +} + +func (i *indexer) Replace(objs []interface{}, resourceVersion string) error { + i.indices = map[string]map[string]map[string]*storeElement{} + for _, obj := range objs { + storeElem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("obj not a storeElement: %#v", obj) + } + err := i.updateElem(storeElem.Key, nil, storeElem) + if err != nil { + return err + } + } + return nil +} + +func (i *indexer) updateElem(key string, oldObj, newObj *storeElement) (err error) { + var oldIndexValues, indexValues []string + for name, indexFunc := range i.indexers { + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } + if err != nil { + return fmt.Errorf("unable to calculate an index entry for key %q on index %q: %w", key, name, err) + } + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] + } + if err != nil { + return fmt.Errorf("unable to calculate an index entry for key %q on index %q: %w", key, name, err) + } + index := i.indices[name] + if index == nil { + index = map[string]map[string]*storeElement{} + i.indices[name] = index + } + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + i.add(key, indexValues[0], newObj, index) + continue + } + for _, value := range oldIndexValues { + i.delete(key, value, index) + } + for _, value := range indexValues { + i.add(key, value, newObj, index) + } + } + return nil +} + +func (i *indexer) add(key, value string, obj *storeElement, index map[string]map[string]*storeElement) { + set := index[value] + if set == nil { + set = map[string]*storeElement{} + index[value] = set + } + set[key] = obj +} + +func (i *indexer) delete(key, value string, index map[string]map[string]*storeElement) { + set := index[value] + if set == nil { + return + } + delete(set, key) + // If we don's delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, value) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index 34a48ac63..541988b31 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -83,55 +83,6 @@ type watchCacheEvent struct { RecordTime time.Time } -// Computing a key of an object is generally non-trivial (it performs -// e.g. validation underneath). Similarly computing object fields and -// labels. To avoid computing them multiple times (to serve the event -// in different List/Watch requests), in the underlying store we are -// keeping structs (key, object, labels, fields). -type storeElement struct { - Key string - Object runtime.Object - Labels labels.Set - Fields fields.Set -} - -func storeElementKey(obj interface{}) (string, error) { - elem, ok := obj.(*storeElement) - if !ok { - return "", fmt.Errorf("not a storeElement: %v", obj) - } - return elem.Key, nil -} - -func storeElementObject(obj interface{}) (runtime.Object, error) { - elem, ok := obj.(*storeElement) - if !ok { - return nil, fmt.Errorf("not a storeElement: %v", obj) - } - return elem.Object, nil -} - -func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc { - return func(obj interface{}) (strings []string, e error) { - seo, err := storeElementObject(obj) - if err != nil { - return nil, err - } - return objIndexFunc(seo) - } -} - -func storeElementIndexers(indexers *cache.Indexers) cache.Indexers { - if indexers == nil { - return cache.Indexers{} - } - ret := cache.Indexers{} - for indexName, indexFunc := range *indexers { - ret[indexName] = storeElementIndexFunc(indexFunc) - } - return ret -} - // watchCache implements a Store interface. // However, it depends on the elements implementing runtime.Object interface. // @@ -173,7 +124,7 @@ type watchCache struct { // history" i.e. from the moment just after the newest cached watched event. // It is necessary to effectively allow clients to start watching at now. // NOTE: We assume that is thread-safe. - store cache.Indexer + store storeIndexer // ResourceVersion up to which the watchCache is propagated. resourceVersion uint64 @@ -223,7 +174,7 @@ func newWatchCache( upperBoundCapacity: defaultUpperBoundCapacity, startIndex: 0, endIndex: 0, - store: cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)), + store: newStoreIndexer(indexers), resourceVersion: 0, listResourceVersion: 0, eventHandler: eventHandler, @@ -501,29 +452,7 @@ func (s sortableStoreElements) Swap(i, j int) { // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. -func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { - items, rv, index, err := w.waitUntilFreshAndListItems(ctx, resourceVersion, key, matchValues) - if err != nil { - return nil, 0, "", err - } - - var result []interface{} - for _, item := range items { - elem, ok := item.(*storeElement) - if !ok { - return nil, 0, "", fmt.Errorf("non *storeElement returned from storage: %v", item) - } - if !hasPathPrefix(elem.Key, key) { - continue - } - result = append(result, item) - } - - sort.Sort(sortableStoreElements(result)) - return result, rv, index, nil -} - -func (w *watchCache) waitUntilFreshAndListItems(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { +func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && requestWatchProgressSupported && w.notFresh(resourceVersion) { w.waitingUntilFresh.Add() @@ -537,21 +466,46 @@ func (w *watchCache) waitUntilFreshAndListItems(ctx context.Context, resourceVer if err != nil { return result, rv, index, err } - - result, rv, index, err = func() ([]interface{}, uint64, string, error) { + var prefixFilteredAndOrdered bool + result, rv, index, prefixFilteredAndOrdered, err = func() ([]interface{}, uint64, string, bool, error) { // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we // want - they will be filtered out later. The fact that we return less things is only further performance improvement. // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. for _, matchValue := range matchValues { if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { - return result, w.resourceVersion, matchValue.IndexName, nil + return result, w.resourceVersion, matchValue.IndexName, false, nil } } - return w.store.List(), w.resourceVersion, "", nil + if store, ok := w.store.(orderedLister); ok { + result, _ := store.ListPrefix(key, "", 0) + return result, w.resourceVersion, "", true, nil + } + return w.store.List(), w.resourceVersion, "", false, nil }() + if !prefixFilteredAndOrdered { + result, err = filterPrefixAndOrder(key, result) + if err != nil { + return nil, 0, "", err + } + } + return result, w.resourceVersion, index, nil +} - return result, rv, index, err +func filterPrefixAndOrder(prefix string, items []interface{}) ([]interface{}, error) { + var result []interface{} + for _, item := range items { + elem, ok := item.(*storeElement) + if !ok { + return nil, fmt.Errorf("non *storeElement returned from storage: %v", item) + } + if !hasPathPrefix(elem.Key, prefix) { + continue + } + result = append(result, item) + } + sort.Sort(sortableStoreElements(result)) + return result, nil } func (w *watchCache) notFresh(resourceVersion uint64) bool { @@ -739,6 +693,7 @@ func (w *watchCache) isIndexValidLocked(index int) bool { // be called under the watchCache lock. func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, key string, opts storage.ListOptions) (*watchCacheInterval, error) { _, matchesSingle := opts.Predicate.MatchesSingle() + matchesSingle = matchesSingle && !opts.Recursive if opts.SendInitialEvents != nil && *opts.SendInitialEvents { return w.getIntervalFromStoreLocked(key, matchesSingle) } @@ -788,7 +743,7 @@ func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, key string, indexerFunc := func(i int) *watchCacheEvent { return w.cache[i%w.capacity] } - ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, w.RWMutex.RLocker()) + ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, resourceVersion, w.RWMutex.RLocker()) return ci, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go index fa7d38946..4920022cd 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" ) // watchCacheInterval serves as an abstraction over a source @@ -92,26 +91,34 @@ type watchCacheInterval struct { // lock on each invocation of Next(). buffer *watchCacheIntervalBuffer + // resourceVersion is the resourceVersion from which + // the interval was constructed. + resourceVersion uint64 + // lock effectively protects access to the underlying source // of events through - indexer and indexValidator. // // Given that indexer and indexValidator only read state, if // possible, Locker obtained through RLocker() is provided. lock sync.Locker + + // initialEventsEndBookmark will be sent after sending all events in cacheInterval + initialEventsEndBookmark *watchCacheEvent } type attrFunc func(runtime.Object) (labels.Set, fields.Set, error) type indexerFunc func(int) *watchCacheEvent type indexValidator func(int) bool -func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValidator indexValidator, locker sync.Locker) *watchCacheInterval { +func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValidator indexValidator, resourceVersion uint64, locker sync.Locker) *watchCacheInterval { return &watchCacheInterval{ - startIndex: startIndex, - endIndex: endIndex, - indexer: indexer, - indexValidator: indexValidator, - buffer: &watchCacheIntervalBuffer{buffer: make([]*watchCacheEvent, bufferSize)}, - lock: locker, + startIndex: startIndex, + endIndex: endIndex, + indexer: indexer, + indexValidator: indexValidator, + buffer: &watchCacheIntervalBuffer{buffer: make([]*watchCacheEvent, bufferSize)}, + resourceVersion: resourceVersion, + lock: locker, } } @@ -133,7 +140,7 @@ func (s sortableWatchCacheEvents) Swap(i, j int) { // returned by Next() need to be events from a List() done on the underlying store of // the watch cache. // The items returned in the interval will be sorted by Key. -func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc, key string, matchesSingle bool) (*watchCacheInterval, error) { +func newCacheIntervalFromStore(resourceVersion uint64, store storeIndexer, getAttrsFunc attrFunc, key string, matchesSingle bool) (*watchCacheInterval, error) { buffer := &watchCacheIntervalBuffer{} var allItems []interface{} @@ -173,8 +180,9 @@ func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getA ci := &watchCacheInterval{ startIndex: 0, // Simulate that we already have all the events we're looking for. - endIndex: 0, - buffer: buffer, + endIndex: 0, + buffer: buffer, + resourceVersion: resourceVersion, } return ci, nil diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors.go b/vendor/k8s.io/apiserver/pkg/storage/errors.go index cc2c1c974..5bae365a8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors.go +++ b/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -37,6 +37,7 @@ const ( ErrCodeInvalidObj ErrCodeUnreachable ErrCodeTimeout + ErrCodeCorruptObj ) var errCodeToMessage = map[int]string{ @@ -46,6 +47,7 @@ var errCodeToMessage = map[int]string{ ErrCodeInvalidObj: "invalid object", ErrCodeUnreachable: "server unreachable", ErrCodeTimeout: "request timeout", + ErrCodeCorruptObj: "corrupt object", } func NewKeyNotFoundError(key string, rv int64) *StorageError { @@ -82,30 +84,45 @@ func NewUnreachableError(key string, rv int64) *StorageError { func NewTimeoutError(key, msg string) *StorageError { return &StorageError{ - Code: ErrCodeTimeout, - Key: key, - AdditionalErrorMsg: msg, + Code: ErrCodeTimeout, + Key: key, + err: errors.New(msg), } } func NewInvalidObjError(key, msg string) *StorageError { return &StorageError{ - Code: ErrCodeInvalidObj, - Key: key, - AdditionalErrorMsg: msg, + Code: ErrCodeInvalidObj, + Key: key, + err: errors.New(msg), + } +} + +// NewCorruptObjError returns a new StorageError, it represents a corrupt object: +// a) object data retrieved from the storage failed to transform with the given err. +// b) the given object failed to decode with the given err +func NewCorruptObjError(key string, err error) *StorageError { + return &StorageError{ + Code: ErrCodeCorruptObj, + Key: key, + err: err, } } type StorageError struct { - Code int - Key string - ResourceVersion int64 - AdditionalErrorMsg string + Code int + Key string + ResourceVersion int64 + + // inner error + err error } +func (e *StorageError) Unwrap() error { return e.err } + func (e *StorageError) Error() string { - return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", - errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) + return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %v", + errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.err) } // IsNotFound returns true if and only if err is "key" not found error. @@ -138,6 +155,21 @@ func IsInvalidObj(err error) bool { return isErrCode(err, ErrCodeInvalidObj) } +// IsCorruptObject returns true if and only if: +// a) the given object data retrieved from the storage is not transformable, or +// b) the given object failed to decode properly +func IsCorruptObject(err error) bool { + if err == nil { + return false + } + var storageErr *StorageError + if !errors.As(err, &storageErr) { + return false + } + + return storageErr.Code == ErrCodeCorruptObj +} + func isErrCode(err error, code int) bool { if err == nil { return false @@ -172,24 +204,26 @@ func NewInvalidError(errors field.ErrorList) InvalidError { // not from the underlying storage backend (e.g., etcd). type InternalError struct { Reason string + + // retain the inner error to maintain the error tree, so as to enable us + // to do proper error checking, but we also need to be backward compatible. + err error } func (e InternalError) Error() string { return e.Reason } +func (e InternalError) Unwrap() error { return e.err } + // IsInternalError returns true if and only if err is an InternalError. func IsInternalError(err error) bool { _, ok := err.(InternalError) return ok } -func NewInternalError(reason string) InternalError { - return InternalError{reason} -} - -func NewInternalErrorf(format string, a ...interface{}) InternalError { - return InternalError{fmt.Sprintf(format, a...)} +func NewInternalError(err error) InternalError { + return InternalError{Reason: err.Error(), err: err} } var tooLargeResourceVersionCauseMsg = "Too large resource version" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go index e251b6168..bbc65d389 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package storage provides conversion of storage errors to API errors. -package storage // import "k8s.io/apiserver/pkg/storage/errors" +package errors // import "k8s.io/apiserver/pkg/storage/errors" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go index 60a6d5cd8..758cfbec5 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package errors import ( "k8s.io/apimachinery/pkg/api/errors" @@ -32,6 +32,8 @@ func InterpretListError(err error, qualifiedResource schema.GroupResource) error return errors.NewServerTimeout(qualifiedResource, "list", 2) // TODO: make configurable or handled at a higher level case storage.IsInternalError(err): return errors.NewInternalError(err) + case storage.IsCorruptObject(err): + return errors.NewInternalError(err) default: return err } @@ -47,6 +49,8 @@ func InterpretGetError(err error, qualifiedResource schema.GroupResource, name s return errors.NewServerTimeout(qualifiedResource, "get", 2) // TODO: make configurable or handled at a higher level case storage.IsInternalError(err): return errors.NewInternalError(err) + case storage.IsCorruptObject(err): + return errors.NewInternalError(err) default: return err } @@ -62,6 +66,8 @@ func InterpretCreateError(err error, qualifiedResource schema.GroupResource, nam return errors.NewServerTimeout(qualifiedResource, "create", 2) // TODO: make configurable or handled at a higher level case storage.IsInternalError(err): return errors.NewInternalError(err) + case storage.IsCorruptObject(err): + return errors.NewInternalError(err) default: return err } @@ -79,6 +85,8 @@ func InterpretUpdateError(err error, qualifiedResource schema.GroupResource, nam return errors.NewNotFound(qualifiedResource, name) case storage.IsInternalError(err): return errors.NewInternalError(err) + case storage.IsCorruptObject(err): + return errors.NewInternalError(err) default: return err } @@ -96,6 +104,8 @@ func InterpretDeleteError(err error, qualifiedResource schema.GroupResource, nam return errors.NewConflict(qualifiedResource, name, err) case storage.IsInternalError(err): return errors.NewInternalError(err) + case storage.IsCorruptObject(err): + return errors.NewInternalError(err) default: return err } @@ -110,6 +120,8 @@ func InterpretWatchError(err error, resource schema.GroupResource, name string) return errors.NewInvalid(schema.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs) case storage.IsInternalError(err): return errors.NewInternalError(err) + case storage.IsCorruptObject(err): + return errors.NewInternalError(err) default: return err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/corrupt_obj_deleter.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/corrupt_obj_deleter.go new file mode 100644 index 000000000..f94c56eaa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/corrupt_obj_deleter.go @@ -0,0 +1,270 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/value" + "k8s.io/klog/v2" +) + +// NewStoreWithUnsafeCorruptObjectDeletion wraps the given store implementation +// and adds support for unsafe deletion of corrupt objects +func NewStoreWithUnsafeCorruptObjectDeletion(delegate storage.Interface, gr schema.GroupResource) storage.Interface { + return &corruptObjectDeleter{ + Interface: delegate, + groupResource: gr, + } +} + +// WithCorruptObjErrorHandlingDecoder decorates the given decoder, it determines +// if the error returned by the given decoder represents a corrupt object (the +// object is undecodable), and then it wraps the error appropriately so the +// unsafe deleter can determine if the object is a candidate for unsafe deletion +func WithCorruptObjErrorHandlingDecoder(decoder Decoder) Decoder { + return &corruptObjErrorInterpretingDecoder{Decoder: decoder} +} + +// WithCorruptObjErrorHandlingTransformer decorates the given decoder, it +// determines if the error returned by the given transformer represents a +// corrupt object (the data from the storage is untransformable), and then it +// wraps the error appropriately so the unsafe deleter can determine +// if the object is a candidate for unsafe deletion +func WithCorruptObjErrorHandlingTransformer(transformer value.Transformer) value.Transformer { + return &corruptObjErrorInterpretingTransformer{Transformer: transformer} +} + +// corruptObjErrAggregatorFactory returns an error aggregator that aggregates +// corrupt object error(s) that the list operation encounters while +// retrieving objects from the storage. +// maxCount: it is the maximum number of error that will be aggregated +func corruptObjErrAggregatorFactory(maxCount int) func() ListErrorAggregator { + if maxCount <= 0 { + return defaultListErrorAggregatorFactory + } + return func() ListErrorAggregator { + return &corruptObjErrAggregator{maxCount: maxCount} + } +} + +var errTooMany = errors.New("too many errors, the list is truncated") + +// aggregate corrupt object errors from the LIST operation +type corruptObjErrAggregator struct { + errs []error + abortErr error + maxCount int +} + +func (a *corruptObjErrAggregator) Aggregate(key string, err error) bool { + if len(a.errs) >= a.maxCount { + // add a sentinel error to indicate there are more + a.errs = append(a.errs, errTooMany) + return true + } + var corruptObjErr *corruptObjectError + if errors.As(err, &corruptObjErr) { + a.errs = append(a.errs, storage.NewCorruptObjError(key, corruptObjErr)) + return false + } + + // not a corrupt object error, the list operation should abort + a.abortErr = err + return true +} + +func (a *corruptObjErrAggregator) Err() error { + switch { + case len(a.errs) == 0 && a.abortErr != nil: + return a.abortErr + case len(a.errs) > 0: + err := utilerrors.NewAggregate(a.errs) + return &aggregatedStorageError{errs: err, resourcePrefix: "list"} + default: + return nil + } +} + +// corruptObjectDeleter facilitates unsafe deletion of corrupt objects for etcd +type corruptObjectDeleter struct { + storage.Interface + groupResource schema.GroupResource +} + +func (s *corruptObjectDeleter) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error { + if err := s.Interface.Get(ctx, key, opts, out); err != nil { + var corruptObjErr *corruptObjectError + if !errors.As(err, &corruptObjErr) { + // this error does not represent a corrupt object + return err + } + // the unsafe deleter at the registry layer will check whether + // the given err represents a corrupt object in order to + // initiate the unsafe deletion flow. + return storage.NewCorruptObjError(key, corruptObjErr) + } + return nil +} + +func (s *corruptObjectDeleter) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + err := s.Interface.GetList(ctx, key, opts, listObj) + if err == nil { + return nil + } + + var aggregatedErr *aggregatedStorageError + if errors.As(err, &aggregatedErr) { + // we have aggregated a list of corrupt objects + klog.V(5).ErrorS(aggregatedErr, "corrupt objects") + return aggregatedErr.NewAPIStatusError(s.groupResource) + } + return err +} + +// corruptObjErrorInterpretingDecoder wraps the error returned by the decorated decoder +type corruptObjErrorInterpretingDecoder struct { + Decoder +} + +func (d *corruptObjErrorInterpretingDecoder) Decode(value []byte, objPtr runtime.Object, rev int64) error { + // TODO: right now any error is deemed as undecodable, in + // the future, we can apply some filter, if need be. + if err := d.Decoder.Decode(value, objPtr, rev); err != nil { + return &corruptObjectError{err: err, errType: undecodable, revision: rev} + } + return nil +} + +// decodeListItem decodes bytes value in array into object. +func (d *corruptObjErrorInterpretingDecoder) DecodeListItem(ctx context.Context, data []byte, rev uint64, newItemFunc func() runtime.Object) (runtime.Object, error) { + // TODO: right now any error is deemed as undecodable, in + // the future, we can apply some filter, if need be. + obj, err := d.Decoder.DecodeListItem(ctx, data, rev, newItemFunc) + if err != nil { + err = &corruptObjectError{err: err, errType: undecodable, revision: int64(rev)} + } + return obj, err +} + +// corruptObjErrorInterpretingTransformer wraps the error returned by the transformer +type corruptObjErrorInterpretingTransformer struct { + value.Transformer +} + +func (t *corruptObjErrorInterpretingTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { + // TODO: right now any error is deemed as undecodable, in the future, we + // can apply some filter, if need be. For example, any network error + out, stale, err := t.Transformer.TransformFromStorage(ctx, data, dataCtx) + if err != nil { + err = &corruptObjectError{err: err, errType: untransformable} + } + return out, stale, err +} + +// corruptObjectError is used internally, only by the corrupt object +// deleter, this error represents a corrup object: +// a) the data from the storage failed to transform, or +// b) the data failed to decode into an object +// NOTE: this error does not have any information to identify the object +// that is corrupt, for example the storage key associated with the object +type corruptObjectError struct { + err error + errType int + revision int64 +} + +const ( + untransformable int = iota + 1 + undecodable +) + +var typeToMessage = map[int]string{ + untransformable: "data from the storage is not transformable", + undecodable: "object not decodable", +} + +func (e *corruptObjectError) Unwrap() error { return e.err } +func (e *corruptObjectError) Error() string { + return fmt.Sprintf("%s revision=%d: %v", typeToMessage[e.errType], e.revision, e.err) +} + +// aggregatedStorageError holds an aggregated list of storage.StorageError +type aggregatedStorageError struct { + resourcePrefix string + errs utilerrors.Aggregate +} + +func (e *aggregatedStorageError) Error() string { + errs := e.errs.Errors() + var b strings.Builder + fmt.Fprintf(&b, "unable to transform or decode %d objects: {\n", len(errs)) + for _, err := range errs { + fmt.Fprintf(&b, "\t%s\n", err.Error()) + } + b.WriteString("}") + return b.String() +} + +// NewAPIStatusError creates a new APIStatus object from the +// aggregated list of StorageError +func (e *aggregatedStorageError) NewAPIStatusError(qualifiedResource schema.GroupResource) *apierrors.StatusError { + var causes []metav1.StatusCause + for _, err := range e.errs.Errors() { + var storageErr *storage.StorageError + if errors.As(err, &storageErr) { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeUnexpectedServerResponse, + Field: storageErr.Key, + // TODO: do we need to expose the internal error message here? + Message: err.Error(), + }) + continue + } + if errors.Is(err, errTooMany) { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeTooMany, + Message: errTooMany.Error(), + }) + } + } + + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonStoreReadError, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: e.resourcePrefix, + Causes: causes, + }, + Message: fmt.Sprintf("failed to read one or more %s from the storage", qualifiedResource.String()), + }, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/decoder.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/decoder.go new file mode 100644 index 000000000..f70101529 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/decoder.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/storage" + + "k8s.io/klog/v2" +) + +// NewDefaultDecoder returns the default decoder for etcd3 store +func NewDefaultDecoder(codec runtime.Codec, versioner storage.Versioner) Decoder { + return &defaultDecoder{ + codec: codec, + versioner: versioner, + } +} + +// Decoder is used by the etcd storage implementation to decode +// transformed data from the storage into an object +type Decoder interface { + // Decode decodes value of bytes into object. It will also + // set the object resource version to rev. + // On success, objPtr would be set to the object. + Decode(value []byte, objPtr runtime.Object, rev int64) error + + // DecodeListItem decodes bytes value in array into object. + DecodeListItem(ctx context.Context, data []byte, rev uint64, newItemFunc func() runtime.Object) (runtime.Object, error) +} + +var _ Decoder = &defaultDecoder{} + +type defaultDecoder struct { + codec runtime.Codec + versioner storage.Versioner +} + +// decode decodes value of bytes into object. It will also set the object resource version to rev. +// On success, objPtr would be set to the object. +func (d *defaultDecoder) Decode(value []byte, objPtr runtime.Object, rev int64) error { + if _, err := conversion.EnforcePtr(objPtr); err != nil { + // nolint:errorlint // this code was moved from store.go as is + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + _, _, err := d.codec.Decode(value, nil, objPtr) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + if err := d.versioner.UpdateObject(objPtr, uint64(rev)); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + return nil +} + +// decodeListItem decodes bytes value in array into object. +func (d *defaultDecoder) DecodeListItem(ctx context.Context, data []byte, rev uint64, newItemFunc func() runtime.Object) (runtime.Object, error) { + startedAt := time.Now() + defer func() { + endpointsrequest.TrackDecodeLatency(ctx, time.Since(startedAt)) + }() + + obj, _, err := d.codec.Decode(data, nil, newItemFunc()) + if err != nil { + return nil, err + } + + if err := d.versioner.UpdateObject(obj, rev); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + + return obj, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go index d71c9917d..7dd91d949 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go @@ -17,7 +17,11 @@ limitations under the License. package etcd3 import ( + goerrors "errors" + "net/http" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/storage" etcdrpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -29,6 +33,19 @@ func interpretWatchError(err error) error { case err == etcdrpc.ErrCompacted: return errors.NewResourceExpired("The resourceVersion for the provided watch is too old.") } + + var corruptobjDeletedErr *corruptObjectDeletedError + if goerrors.As(err, &corruptobjDeletedErr) { + return &errors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonStoreReadError, + Message: corruptobjDeletedErr.Error(), + }, + } + } + return err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 3eb967188..f38b160aa 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -19,14 +19,15 @@ package etcd3 import ( "bytes" "context" - "errors" "fmt" "path" "reflect" "strings" "time" + "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/kubernetes" "go.opentelemetry.io/otel/attribute" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -38,7 +39,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/audit" - endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" @@ -73,7 +73,7 @@ func (d authenticatedDataString) AuthenticatedData() []byte { var _ value.Context = authenticatedDataString("") type store struct { - client *clientv3.Client + client *kubernetes.Client codec runtime.Codec versioner storage.Versioner transformer value.Transformer @@ -82,6 +82,8 @@ type store struct { groupResourceString string watcher *watcher leaseManager *leaseManager + decoder Decoder + listErrAggrFactory func() ListErrorAggregator } func (s *store) RequestWatchProgress(ctx context.Context) error { @@ -98,13 +100,52 @@ type objState struct { stale bool } +// ListErrorAggregator aggregates the error(s) that the LIST operation +// encounters while retrieving object(s) from the storage +type ListErrorAggregator interface { + // Aggregate aggregates the given error from list operation + // key: it identifies the given object in the storage. + // err: it represents the error the list operation encountered while + // retrieving the given object from the storage. + // done: true if the aggregation is done and the list operation should + // abort, otherwise the list operation will continue + Aggregate(key string, err error) bool + + // Err returns the aggregated error + Err() error +} + +// defaultListErrorAggregatorFactory returns the default list error +// aggregator that maintains backward compatibility, which is abort +// the list operation as soon as it encounters the first error +func defaultListErrorAggregatorFactory() ListErrorAggregator { return &abortOnFirstError{} } + +// LIST aborts on the first error it encounters (backward compatible) +type abortOnFirstError struct { + err error +} + +func (a *abortOnFirstError) Aggregate(key string, err error) bool { + a.err = err + return true +} +func (a *abortOnFirstError) Err() error { return a.err } + // New returns an etcd3 implementation of storage.Interface. -func New(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) storage.Interface { - return newStore(c, codec, newFunc, newListFunc, prefix, resourcePrefix, groupResource, transformer, leaseManagerConfig) +func New(c *kubernetes.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig, decoder Decoder, versioner storage.Versioner) storage.Interface { + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + transformer = WithCorruptObjErrorHandlingTransformer(transformer) + decoder = WithCorruptObjErrorHandlingDecoder(decoder) + } + var store storage.Interface + store = newStore(c, codec, newFunc, newListFunc, prefix, resourcePrefix, groupResource, transformer, leaseManagerConfig, decoder, versioner) + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + store = NewStoreWithUnsafeCorruptObjectDeletion(store, groupResource) + } + return store } -func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) *store { - versioner := storage.APIObjectVersioner{} +func newStore(c *kubernetes.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig, decoder Decoder, versioner storage.Versioner) *store { // for compatibility with etcd2 impl. // no-op for default prefix of '/registry'. // keeps compatibility with etcd2 impl for custom prefixes that don't start with '/' @@ -114,8 +155,13 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func pathPrefix += "/" } + listErrAggrFactory := defaultListErrorAggregatorFactory + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + listErrAggrFactory = corruptObjErrAggregatorFactory(100) + } + w := &watcher{ - client: c, + client: c.Client, codec: codec, newFunc: newFunc, groupResource: groupResource, @@ -136,7 +182,9 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func groupResource: groupResource, groupResourceString: groupResource.String(), watcher: w, - leaseManager: newDefaultLeaseManager(c, leaseManagerConfig), + leaseManager: newDefaultLeaseManager(c.Client, leaseManagerConfig), + decoder: decoder, + listErrAggrFactory: listErrAggrFactory, } w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) { @@ -160,29 +208,28 @@ func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, ou return err } startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, preparedKey) + getResp, err := s.client.Kubernetes.Get(ctx, preparedKey, kubernetes.GetOptions{}) metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) if err != nil { return err } - if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil { + if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Revision)); err != nil { return err } - if len(getResp.Kvs) == 0 { + if getResp.KV == nil { if opts.IgnoreNotFound { return runtime.SetZeroValue(out) } return storage.NewKeyNotFoundError(preparedKey, 0) } - kv := getResp.Kvs[0] - data, _, err := s.transformer.TransformFromStorage(ctx, kv.Value, authenticatedDataString(preparedKey)) + data, _, err := s.transformer.TransformFromStorage(ctx, getResp.KV.Value, authenticatedDataString(preparedKey)) if err != nil { - return storage.NewInternalError(err.Error()) + return storage.NewInternalError(err) } - err = decode(s.codec, s.versioner, data, out, kv.ModRevision) + err = s.decoder.Decode(data, out, getResp.KV.ModRevision) if err != nil { recordDecodeError(s.groupResourceString, preparedKey) return err @@ -217,24 +264,23 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, } span.AddEvent("Encode succeeded", attribute.Int("len", len(data))) - opts, err := s.ttlOpts(ctx, int64(ttl)) - if err != nil { - return err + var lease clientv3.LeaseID + if ttl != 0 { + lease, err = s.leaseManager.GetLease(ctx, int64(ttl)) + if err != nil { + return err + } } newData, err := s.transformer.TransformToStorage(ctx, data, authenticatedDataString(preparedKey)) if err != nil { span.AddEvent("TransformToStorage failed", attribute.String("err", err.Error())) - return storage.NewInternalError(err.Error()) + return storage.NewInternalError(err) } span.AddEvent("TransformToStorage succeeded") startTime := time.Now() - txnResp, err := s.client.KV.Txn(ctx).If( - notFound(preparedKey), - ).Then( - clientv3.OpPut(preparedKey, string(newData), opts...), - ).Commit() + txnResp, err := s.client.Kubernetes.OptimisticPut(ctx, preparedKey, newData, 0, kubernetes.PutOptions{LeaseID: lease}) metrics.RecordEtcdRequest("create", s.groupResourceString, err, startTime) if err != nil { span.AddEvent("Txn call failed", attribute.String("err", err.Error())) @@ -247,8 +293,7 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, } if out != nil { - putResp := txnResp.Responses[0].GetResponsePut() - err = decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + err = s.decoder.Decode(data, out, txnResp.Revision) if err != nil { span.AddEvent("decode failed", attribute.Int("len", len(data)), attribute.String("err", err.Error())) recordDecodeError(s.groupResourceString, preparedKey) @@ -262,7 +307,7 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, // Delete implements storage.Interface.Delete. func (s *store) Delete( ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, - validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error { + validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object, opts storage.DeleteOptions) error { preparedKey, err := s.prepareKey(key) if err != nil { return err @@ -271,13 +316,18 @@ func (s *store) Delete( if err != nil { return fmt.Errorf("unable to convert output object to pointer: %v", err) } - return s.conditionalDelete(ctx, preparedKey, out, v, preconditions, validateDeletion, cachedExistingObject) + + skipTransformDecode := false + if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) { + skipTransformDecode = opts.IgnoreStoreReadError + } + return s.conditionalDelete(ctx, preparedKey, out, v, preconditions, validateDeletion, cachedExistingObject, skipTransformDecode) } func (s *store) conditionalDelete( ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions, - validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error { - getCurrentState := s.getCurrentState(ctx, key, v, false) + validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object, skipTransformDecode bool) error { + getCurrentState := s.getCurrentState(ctx, key, v, false, skipTransformDecode) var origState *objState var err error @@ -347,21 +397,16 @@ func (s *store) conditionalDelete( } startTime := time.Now() - txnResp, err := s.client.KV.Txn(ctx).If( - clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), - ).Then( - clientv3.OpDelete(key), - ).Else( - clientv3.OpGet(key), - ).Commit() + txnResp, err := s.client.Kubernetes.OptimisticDelete(ctx, key, origState.rev, kubernetes.DeleteOptions{ + GetOnFailure: true, + }) metrics.RecordEtcdRequest("delete", s.groupResourceString, err, startTime) if err != nil { return err } if !txnResp.Succeeded { - getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) - origState, err = s.getState(ctx, getResp, key, v, false) + origState, err = s.getState(ctx, txnResp.KV, key, v, false, skipTransformDecode) if err != nil { return err } @@ -369,17 +414,12 @@ func (s *store) conditionalDelete( continue } - if len(txnResp.Responses) == 0 || txnResp.Responses[0].GetResponseDeleteRange() == nil { - return errors.New(fmt.Sprintf("invalid DeleteRange response: %v", txnResp.Responses)) - } - deleteResp := txnResp.Responses[0].GetResponseDeleteRange() - if deleteResp.Header == nil { - return errors.New("invalid DeleteRange response - nil header") - } - err = decode(s.codec, s.versioner, origState.data, out, deleteResp.Header.Revision) - if err != nil { - recordDecodeError(s.groupResourceString, key) - return err + if !skipTransformDecode { + err = s.decoder.Decode(origState.data, out, txnResp.Revision) + if err != nil { + recordDecodeError(s.groupResourceString, key) + return err + } } return nil } @@ -405,7 +445,8 @@ func (s *store) GuaranteedUpdate( return fmt.Errorf("unable to convert output object to pointer: %v", err) } - getCurrentState := s.getCurrentState(ctx, preparedKey, v, ignoreNotFound) + skipTransformDecode := false + getCurrentState := s.getCurrentState(ctx, preparedKey, v, ignoreNotFound, skipTransformDecode) var origState *objState var origStateIsCurrent bool @@ -491,7 +532,7 @@ func (s *store) GuaranteedUpdate( } // recheck that the data from etcd is not stale before short-circuiting a write if !origState.stale { - err = decode(s.codec, s.versioner, origState.data, destination, origState.rev) + err = s.decoder.Decode(origState.data, destination, origState.rev) if err != nil { recordDecodeError(s.groupResourceString, preparedKey) return err @@ -503,24 +544,25 @@ func (s *store) GuaranteedUpdate( newData, err := s.transformer.TransformToStorage(ctx, data, transformContext) if err != nil { span.AddEvent("TransformToStorage failed", attribute.String("err", err.Error())) - return storage.NewInternalError(err.Error()) + return storage.NewInternalError(err) } span.AddEvent("TransformToStorage succeeded") - opts, err := s.ttlOpts(ctx, int64(ttl)) - if err != nil { - return err + var lease clientv3.LeaseID + if ttl != 0 { + lease, err = s.leaseManager.GetLease(ctx, int64(ttl)) + if err != nil { + return err + } } span.AddEvent("Transaction prepared") startTime := time.Now() - txnResp, err := s.client.KV.Txn(ctx).If( - clientv3.Compare(clientv3.ModRevision(preparedKey), "=", origState.rev), - ).Then( - clientv3.OpPut(preparedKey, string(newData), opts...), - ).Else( - clientv3.OpGet(preparedKey), - ).Commit() + + txnResp, err := s.client.Kubernetes.OptimisticPut(ctx, preparedKey, newData, origState.rev, kubernetes.PutOptions{ + GetOnFailure: true, + LeaseID: lease, + }) metrics.RecordEtcdRequest("update", s.groupResourceString, err, startTime) if err != nil { span.AddEvent("Txn call failed", attribute.String("err", err.Error())) @@ -529,9 +571,8 @@ func (s *store) GuaranteedUpdate( span.AddEvent("Txn call completed") span.AddEvent("Transaction committed") if !txnResp.Succeeded { - getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", preparedKey) - origState, err = s.getState(ctx, getResp, preparedKey, v, ignoreNotFound) + origState, err = s.getState(ctx, txnResp.KV, preparedKey, v, ignoreNotFound, skipTransformDecode) if err != nil { return err } @@ -539,9 +580,8 @@ func (s *store) GuaranteedUpdate( origStateIsCurrent = true continue } - putResp := txnResp.Responses[0].GetResponsePut() - err = decode(s.codec, s.versioner, data, destination, putResp.Header.Revision) + err = s.decoder.Decode(data, destination, txnResp.Revision) if err != nil { span.AddEvent("decode failed", attribute.Int("len", len(data)), attribute.String("err", err.Error())) recordDecodeError(s.groupResourceString, preparedKey) @@ -583,12 +623,12 @@ func (s *store) Count(key string) (int64, error) { } startTime := time.Now() - getResp, err := s.client.KV.Get(context.Background(), preparedKey, clientv3.WithRange(clientv3.GetPrefixRangeEnd(preparedKey)), clientv3.WithCountOnly()) + count, err := s.client.Kubernetes.Count(context.Background(), preparedKey, kubernetes.CountOptions{}) metrics.RecordEtcdRequest("listWithCount", preparedKey, err, startTime) if err != nil { return 0, err } - return getResp.Count, nil + return count, nil } // ReadinessCheck implements storage.Interface. @@ -639,7 +679,7 @@ func (s *store) resolveGetListRev(continueKey string, continueRV int64, opts sto // GetList implements storage.Interface. func (s *store) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { - preparedKey, err := s.prepareKey(key) + keyPrefix, err := s.prepareKey(key) if err != nil { return err } @@ -664,27 +704,13 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // get children "directories". e.g. if we have key "/a", "/a/b", "/ab", getting keys // with prefix "/a" will return all three, while with prefix "/a/" will return only // "/a/b" which is the correct answer. - if opts.Recursive && !strings.HasSuffix(preparedKey, "/") { - preparedKey += "/" + if opts.Recursive && !strings.HasSuffix(keyPrefix, "/") { + keyPrefix += "/" } - keyPrefix := preparedKey // set the appropriate clientv3 options to filter the returned data set - var limitOption *clientv3.OpOption limit := opts.Predicate.Limit - var paging bool - options := make([]clientv3.OpOption, 0, 4) - if opts.Predicate.Limit > 0 { - paging = true - options = append(options, clientv3.WithLimit(limit)) - limitOption = &options[len(options)-1] - } - - if opts.Recursive { - rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) - options = append(options, clientv3.WithRange(rangeEnd)) - } - + paging := opts.Predicate.Limit > 0 newItemFunc := getNewItemFunc(listObj, v) var continueRV, withRev int64 @@ -694,20 +720,15 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption if err != nil { return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) } - preparedKey = continueKey } if withRev, err = s.resolveGetListRev(continueKey, continueRV, opts); err != nil { return err } - if withRev != 0 { - options = append(options, clientv3.WithRev(withRev)) - } - // loop until we have filled the requested limit from etcd or there are no more results var lastKey []byte var hasMore bool - var getResp *clientv3.GetResponse + var getResp kubernetes.ListResponse var numFetched int var numEvald int // Because these metrics are for understanding the costs of handling LIST requests, @@ -722,26 +743,30 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption metricsOp = "list" } + aggregator := s.listErrAggrFactory() for { startTime := time.Now() - getResp, err = s.client.KV.Get(ctx, preparedKey, options...) + getResp, err = s.getList(ctx, keyPrefix, opts.Recursive, kubernetes.ListOptions{ + Revision: withRev, + Limit: limit, + Continue: continueKey, + }) metrics.RecordEtcdRequest(metricsOp, s.groupResourceString, err, startTime) if err != nil { return interpretListError(err, len(opts.Predicate.Continue) > 0, continueKey, keyPrefix) } numFetched += len(getResp.Kvs) - if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil { + if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Revision)); err != nil { return err } - hasMore = getResp.More + hasMore = int64(len(getResp.Kvs)) < getResp.Count - if len(getResp.Kvs) == 0 && getResp.More { + if len(getResp.Kvs) == 0 && hasMore { return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") } // indicate to the client which resource version was returned, and use the same resource version for subsequent requests. if withRev == 0 { - withRev = getResp.Header.Revision - options = append(options, clientv3.WithRev(withRev)) + withRev = getResp.Revision } // avoid small allocations for the result slice, since this can be called in many @@ -762,7 +787,10 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption data, _, err := s.transformer.TransformFromStorage(ctx, kv.Value, authenticatedDataString(kv.Key)) if err != nil { - return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err) + if done := aggregator.Aggregate(string(kv.Key), storage.NewInternalError(fmt.Errorf("unable to transform key %q: %w", kv.Key, err))); done { + return aggregator.Err() + } + continue } // Check if the request has already timed out before decode object @@ -773,10 +801,13 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption default: } - obj, err := decodeListItem(ctx, data, uint64(kv.ModRevision), s.codec, s.versioner, newItemFunc) + obj, err := s.decoder.DecodeListItem(ctx, data, uint64(kv.ModRevision), newItemFunc) if err != nil { recordDecodeError(s.groupResourceString, string(kv.Key)) - return err + if done := aggregator.Aggregate(string(kv.Key), err); done { + return aggregator.Err() + } + continue } // being unable to set the version does not prevent the object from being extracted @@ -789,6 +820,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // free kv early. Long lists can take O(seconds) to decode. getResp.Kvs[i] = nil } + continueKey = string(lastKey) + "\x00" // no more results remain or we didn't request paging if !hasMore || !paging { @@ -806,9 +838,11 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption if limit > maxLimit { limit = maxLimit } - *limitOption = clientv3.WithLimit(limit) } - preparedKey = string(lastKey) + "\x00" + } + + if err := aggregator.Err(); err != nil { + return err } if v.IsNil() { @@ -823,6 +857,26 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption return s.versioner.UpdateList(listObj, uint64(withRev), continueValue, remainingItemCount) } +func (s *store) getList(ctx context.Context, keyPrefix string, recursive bool, options kubernetes.ListOptions) (kubernetes.ListResponse, error) { + if recursive { + return s.client.Kubernetes.List(ctx, keyPrefix, options) + } + getResp, err := s.client.Kubernetes.Get(ctx, keyPrefix, kubernetes.GetOptions{ + Revision: options.Revision, + }) + var resp kubernetes.ListResponse + if getResp.KV != nil { + resp.Kvs = []*mvccpb.KeyValue{getResp.KV} + resp.Count = 1 + resp.Revision = getResp.Revision + } else { + resp.Kvs = []*mvccpb.KeyValue{} + resp.Count = 0 + resp.Revision = getResp.Revision + } + return resp, err +} + // growSlice takes a slice value and grows its capacity up // to the maximum of the passed sizes or maxCapacity, whichever // is smaller. Above maxCapacity decisions about allocation are left @@ -878,19 +932,25 @@ func (s *store) watchContext(ctx context.Context) context.Context { return clientv3.WithRequireLeader(ctx) } -func (s *store) getCurrentState(ctx context.Context, key string, v reflect.Value, ignoreNotFound bool) func() (*objState, error) { +func (s *store) getCurrentState(ctx context.Context, key string, v reflect.Value, ignoreNotFound bool, skipTransformDecode bool) func() (*objState, error) { return func() (*objState, error) { startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, key) + getResp, err := s.client.Kubernetes.Get(ctx, key, kubernetes.GetOptions{}) metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) if err != nil { return nil, err } - return s.getState(ctx, getResp, key, v, ignoreNotFound) + return s.getState(ctx, getResp.KV, key, v, ignoreNotFound, skipTransformDecode) } } -func (s *store) getState(ctx context.Context, getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { +// getState constructs a new objState from the given response from the storage. +// skipTransformDecode: if true, the function will neither transform the data +// from the storage nor decode it into an object; otherwise, data from the +// storage will be transformed and decoded. +// NOTE: when skipTransformDecode is true, the 'data', and the 'obj' fields +// of the objState will be nil, and 'stale' will be set to true. +func (s *store) getState(ctx context.Context, kv *mvccpb.KeyValue, key string, v reflect.Value, ignoreNotFound bool, skipTransformDecode bool) (*objState, error) { state := &objState{ meta: &storage.ResponseMeta{}, } @@ -901,7 +961,7 @@ func (s *store) getState(ctx context.Context, getResp *clientv3.GetResponse, key state.obj = reflect.New(v.Type()).Interface().(runtime.Object) } - if len(getResp.Kvs) == 0 { + if kv == nil { if !ignoreNotFound { return nil, storage.NewKeyNotFoundError(key, 0) } @@ -909,15 +969,25 @@ func (s *store) getState(ctx context.Context, getResp *clientv3.GetResponse, key return nil, err } } else { - data, stale, err := s.transformer.TransformFromStorage(ctx, getResp.Kvs[0].Value, authenticatedDataString(key)) + state.rev = kv.ModRevision + state.meta.ResourceVersion = uint64(state.rev) + + if skipTransformDecode { + // be explicit that we don't have the object + state.obj = nil + state.stale = true // this seems a more sane value here + return state, nil + } + + data, stale, err := s.transformer.TransformFromStorage(ctx, kv.Value, authenticatedDataString(key)) if err != nil { - return nil, storage.NewInternalError(err.Error()) + return nil, storage.NewInternalError(err) } - state.rev = getResp.Kvs[0].ModRevision - state.meta.ResourceVersion = uint64(state.rev) + state.data = data state.stale = stale - if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil { + + if err := s.decoder.Decode(state.data, state.obj, state.rev); err != nil { recordDecodeError(s.groupResourceString, key) return nil, err } @@ -969,19 +1039,6 @@ func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtim return ret, ttl, nil } -// ttlOpts returns client options based on given ttl. -// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length -func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) { - if ttl == 0 { - return nil, nil - } - id, err := s.leaseManager.GetLease(ctx, ttl) - if err != nil { - return nil, err - } - return []clientv3.OpOption{clientv3.WithLease(id)}, nil -} - // validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is // greater than the most recent actualRevision available from storage. func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error { @@ -1024,52 +1081,12 @@ func (s *store) prepareKey(key string) (string, error) { return s.pathPrefix + key[startIndex:], nil } -// decode decodes value of bytes into object. It will also set the object resource version to rev. -// On success, objPtr would be set to the object. -func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error { - if _, err := conversion.EnforcePtr(objPtr); err != nil { - return fmt.Errorf("unable to convert output object to pointer: %v", err) - } - _, _, err := codec.Decode(value, nil, objPtr) - if err != nil { - return err - } - // being unable to set the version does not prevent the object from being extracted - if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil { - klog.Errorf("failed to update object version: %v", err) - } - return nil -} - -// decodeListItem decodes bytes value in array into object. -func decodeListItem(ctx context.Context, data []byte, rev uint64, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) (runtime.Object, error) { - startedAt := time.Now() - defer func() { - endpointsrequest.TrackDecodeLatency(ctx, time.Since(startedAt)) - }() - - obj, _, err := codec.Decode(data, nil, newItemFunc()) - if err != nil { - return nil, err - } - - if err := versioner.UpdateObject(obj, rev); err != nil { - klog.Errorf("failed to update object version: %v", err) - } - - return obj, nil -} - // recordDecodeError record decode error split by object type. func recordDecodeError(resource string, key string) { metrics.RecordDecodeError(resource) klog.V(4).Infof("Decoding %s \"%s\" failed", resource, key) } -func notFound(key string) clientv3.Cmp { - return clientv3.Compare(clientv3.ModRevision(key), "=", 0) -} - // getTypeName returns type name of an object for reporting purposes. func getTypeName(obj interface{}) string { return reflect.TypeOf(obj).String() diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index 536f2e1c0..e2141395b 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -438,7 +438,12 @@ func (wc *watchChan) serialProcessEvents(wg *sync.WaitGroup) { for { select { case e := <-wc.incomingEventChan: - res := wc.transform(e) + res, err := wc.transform(e) + if err != nil { + wc.sendError(err) + return + } + if res == nil { continue } @@ -461,10 +466,8 @@ func (wc *watchChan) serialProcessEvents(wg *sync.WaitGroup) { func (wc *watchChan) concurrentProcessEvents(wg *sync.WaitGroup) { p := concurrentOrderedEventProcessing{ - input: wc.incomingEventChan, - processFunc: wc.transform, - output: wc.resultChan, - processingQueue: make(chan chan *watch.Event, processEventConcurrency-1), + wc: wc, + processingQueue: make(chan chan *processingResult, processEventConcurrency-1), objectType: wc.watcher.objectType, groupResource: wc.watcher.groupResource, @@ -481,12 +484,15 @@ func (wc *watchChan) concurrentProcessEvents(wg *sync.WaitGroup) { }() } +type processingResult struct { + event *watch.Event + err error +} + type concurrentOrderedEventProcessing struct { - input chan *event - processFunc func(*event) *watch.Event - output chan watch.Event + wc *watchChan - processingQueue chan chan *watch.Event + processingQueue chan chan *processingResult // Metadata for logging objectType string groupResource schema.GroupResource @@ -498,28 +504,29 @@ func (p *concurrentOrderedEventProcessing) scheduleEventProcessing(ctx context.C select { case <-ctx.Done(): return - case e = <-p.input: + case e = <-p.wc.incomingEventChan: } - processingResponse := make(chan *watch.Event, 1) + processingResponse := make(chan *processingResult, 1) select { case <-ctx.Done(): return case p.processingQueue <- processingResponse: } wg.Add(1) - go func(e *event, response chan<- *watch.Event) { + go func(e *event, response chan<- *processingResult) { defer wg.Done() + responseEvent, err := p.wc.transform(e) select { case <-ctx.Done(): - case response <- p.processFunc(e): + case response <- &processingResult{event: responseEvent, err: err}: } }(e, processingResponse) } } func (p *concurrentOrderedEventProcessing) collectEventProcessing(ctx context.Context) { - var processingResponse chan *watch.Event - var e *watch.Event + var processingResponse chan *processingResult + var r *processingResult for { select { case <-ctx.Done(): @@ -529,21 +536,25 @@ func (p *concurrentOrderedEventProcessing) collectEventProcessing(ctx context.Co select { case <-ctx.Done(): return - case e = <-processingResponse: + case r = <-processingResponse: + } + if r.err != nil { + p.wc.sendError(r.err) + return } - if e == nil { + if r.event == nil { continue } - if len(p.output) == cap(p.output) { - klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", p.objectType, "groupResource", p.groupResource) + if len(p.wc.resultChan) == cap(p.wc.resultChan) { + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", p.wc.watcher.objectType, "groupResource", p.wc.watcher.groupResource) } // If user couldn't receive results fast enough, we also block incoming events from watcher. // Because storing events in local will cause more memory usage. // The worst case would be closing the fast watcher. select { - case <-ctx.Done(): + case p.wc.resultChan <- *r.event: + case <-p.wc.ctx.Done(): return - case p.output <- *e: } } } @@ -561,12 +572,11 @@ func (wc *watchChan) acceptAll() bool { } // transform transforms an event into a result for user if not filtered. -func (wc *watchChan) transform(e *event) (res *watch.Event) { +func (wc *watchChan) transform(e *event) (res *watch.Event, err error) { curObj, oldObj, err := wc.prepareObjs(e) if err != nil { klog.Errorf("failed to prepare current and previous objects: %v", err) - wc.sendError(err) - return nil + return nil, err } switch { @@ -574,12 +584,11 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { object := wc.watcher.newFunc() if err := wc.watcher.versioner.UpdateObject(object, uint64(e.rev)); err != nil { klog.Errorf("failed to propagate object version: %v", err) - return nil + return nil, fmt.Errorf("failed to propagate object resource version: %w", err) } if e.isInitialEventsEndBookmark { if err := storage.AnnotateInitialEventsEndBookmark(object); err != nil { - wc.sendError(fmt.Errorf("error while accessing object's metadata gr: %v, type: %v, obj: %#v, err: %v", wc.watcher.groupResource, wc.watcher.objectType, object, err)) - return nil + return nil, fmt.Errorf("error while accessing object's metadata gr: %v, type: %v, obj: %#v, err: %w", wc.watcher.groupResource, wc.watcher.objectType, object, err) } } res = &watch.Event{ @@ -588,7 +597,7 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { } case e.isDeleted: if !wc.filter(oldObj) { - return nil + return nil, nil } res = &watch.Event{ Type: watch.Deleted, @@ -596,7 +605,7 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { } case e.isCreated: if !wc.filter(curObj) { - return nil + return nil, nil } res = &watch.Event{ Type: watch.Added, @@ -608,7 +617,7 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { Type: watch.Modified, Object: curObj, } - return res + return res, nil } curObjPasses := wc.filter(curObj) oldObjPasses := wc.filter(oldObj) @@ -630,7 +639,7 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { } } } - return res + return res, nil } func transformErrorToEvent(err error) *watch.Event { @@ -686,18 +695,40 @@ func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtim if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) { data, _, err := wc.watcher.transformer.TransformFromStorage(wc.ctx, e.prevValue, authenticatedDataString(e.key)) if err != nil { - return nil, nil, err + return nil, nil, wc.watcher.transformIfCorruptObjectError(e, err) } // Note that this sends the *old* object with the etcd revision for the time at // which it gets deleted. oldObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) if err != nil { - return nil, nil, err + return nil, nil, wc.watcher.transformIfCorruptObjectError(e, err) } } return curObj, oldObj, nil } +type corruptObjectDeletedError struct { + err error +} + +func (e *corruptObjectDeletedError) Error() string { + return fmt.Sprintf("saw a DELETED event, but object data is corrupt - %v", e.err) +} +func (e *corruptObjectDeletedError) Unwrap() error { return e.err } + +func (w *watcher) transformIfCorruptObjectError(e *event, err error) error { + var corruptObjErr *corruptObjectError + if !e.isDeleted || !errors.As(err, &corruptObjErr) { + return err + } + + // if we are here it means we received a DELETED event but the object + // associated with it is corrupt because we failed to transform or + // decode the data associated with the object. + // wrap the original error so we can send a proper watch Error event. + return &corruptObjectDeletedError{err: corruptObjErr} +} + func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (_ runtime.Object, err error) { obj, err := runtime.Decode(codec, []byte(data)) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go index cff804b28..3932f0cae 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -108,6 +108,8 @@ type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Obj // ValidateObjectFunc is a function to act on a given object. An error may be returned // if the hook cannot be completed. The function may NOT transform the provided // object. +// NOTE: the object in obj may be nil if it cannot be read from the +// storage, due to transformation or decode error. type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error // ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc. @@ -137,11 +139,11 @@ func (p *Preconditions) Check(key string, obj runtime.Object) error { } objMeta, err := meta.Accessor(obj) if err != nil { - return NewInternalErrorf( - "can't enforce preconditions %v on un-introspectable object %v, got error: %v", - *p, - obj, - err) + return NewInternalError( + fmt.Errorf("can't enforce preconditions %v on un-introspectable object %v, got error: %w", + *p, + obj, + err)) } if p.UID != nil && *p.UID != objMeta.GetUID() { err := fmt.Sprintf( @@ -178,7 +180,7 @@ type Interface interface { // However, the implementations have to retry in case suggestion is stale. Delete( ctx context.Context, key string, out runtime.Object, preconditions *Preconditions, - validateDeletion ValidateObjectFunc, cachedExistingObject runtime.Object) error + validateDeletion ValidateObjectFunc, cachedExistingObject runtime.Object, opts DeleteOptions) error // Watch begins watching the specified key. Events are decoded into API objects, // and any items selected by 'p' are sent down to returned watch.Interface. @@ -312,3 +314,14 @@ type ListOptions struct { // continues streaming events. SendInitialEvents *bool } + +// DeleteOptions provides the options that may be provided for storage delete operations. +type DeleteOptions struct { + // IgnoreStoreReadError, if enabled, will ignore store read error + // such as transformation or decode failure and go ahead with the + // deletion of the object. + // NOTE: for normal deletion flow it should always be false, it may be + // enabled by the caller only to facilitate unsafe deletion of corrupt + // object which otherwise can not be deleted using the normal flow + IgnoreStoreReadError bool +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 4f3aad9f9..49aeaec2b 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -33,6 +33,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/logutil" "go.etcd.io/etcd/client/pkg/v3/transport" clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/kubernetes" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -228,7 +229,7 @@ func newETCD3ProberMonitor(c storagebackend.Config) (*etcd3ProberMonitor, error) return nil, err } return &etcd3ProberMonitor{ - client: client, + client: client.Client, prefix: c.Prefix, endpoints: c.Transport.ServerList, }, nil @@ -282,7 +283,7 @@ func (t *etcd3ProberMonitor) Monitor(ctx context.Context) (metrics.StorageMetric }, nil } -var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, error) { +var newETCD3Client = func(c storagebackend.TransportConfig) (*kubernetes.Client, error) { tlsInfo := transport.TLSInfo{ CertFile: c.CertFile, KeyFile: c.KeyFile, @@ -352,7 +353,7 @@ var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, e Logger: etcd3ClientLogger, } - return clientv3.New(cfg) + return kubernetes.New(cfg) } type runningCompactor struct { @@ -378,12 +379,17 @@ func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration compactorsMu.Lock() defer compactorsMu.Unlock() + if interval == 0 { + // short circuit, if the compaction request from apiserver is disabled + return func() {}, nil + } key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile} if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval { - compactorClient, err := newETCD3Client(c) + client, err := newETCD3Client(c) if err != nil { return nil, err } + compactorClient := client.Client if foundBefore { // replace compactor @@ -435,7 +441,7 @@ func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc fu // decorate the KV instance so we can track etcd latency per request. client.KV = etcd3.NewETCDLatencyTracker(client.KV) - stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval) + stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client.Client, c.DBMetricPollInterval) if err != nil { return nil, nil, err } @@ -455,7 +461,11 @@ func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc fu if transformer == nil { transformer = identity.NewEncryptCheckTransformer() } - return etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig), destroyFunc, nil + + versioner := storage.APIObjectVersioner{} + decoder := etcd3.NewDefaultDecoder(c.Codec, versioner) + store := etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig, decoder, versioner) + return store, destroyFunc, nil } // startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go index 67f7bc79e..09a2a76df 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go @@ -58,6 +58,7 @@ func NewGRPCService(ctx context.Context, endpoint, providerName string, callTime s := &gRPCService{callTimeout: callTimeout} s.connection, err = grpc.Dial( addr, + grpc.WithAuthority("localhost"), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithContextDialer( diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go index 35ec01369..d2b59a29d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go @@ -61,10 +61,10 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "transformation_operations_total", - Help: "Total number of transformations. Successful transformation will have a status 'OK' and a varied status string when the transformation fails. This status and transformation_type fields may be used for alerting on encryption/decryption failure using transformation_type from_storage for decryption and to_storage for encryption", + Help: "Total number of transformations. Successful transformation will have a status 'OK' and a varied status string when the transformation fails. The status, resource, and transformation_type fields can be used for alerting purposes. For example, you can monitor for encryption/decryption failures using the transformation_type (e.g., from_storage for decryption and to_storage for encryption). Additionally, these fields can be used to ensure that the correct transformers are applied to each resource.", StabilityLevel: metrics.ALPHA, }, - []string{"transformation_type", "transformer_prefix", "status"}, + []string{"resource", "transformation_type", "transformer_prefix", "status"}, ) envelopeTransformationCacheMissTotal = metrics.NewCounter( @@ -113,8 +113,8 @@ func RegisterMetrics() { // RecordTransformation records latencies and count of TransformFromStorage and TransformToStorage operations. // Note that transformation_failures_total metric is deprecated, use transformation_operations_total instead. -func RecordTransformation(transformationType, transformerPrefix string, elapsed time.Duration, err error) { - transformerOperationsTotal.WithLabelValues(transformationType, transformerPrefix, getErrorCode(err)).Inc() +func RecordTransformation(resource, transformationType, transformerPrefix string, elapsed time.Duration, err error) { + transformerOperationsTotal.WithLabelValues(resource, transformationType, transformerPrefix, getErrorCode(err)).Inc() if err == nil { transformerLatencies.WithLabelValues(transformationType, transformerPrefix).Observe(elapsed.Seconds()) diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go b/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go index c5e97ac2d..eab90b3fa 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go @@ -105,6 +105,7 @@ func NewPrefixTransformers(err error, transformers ...PrefixTransformer) Transfo func (t *prefixTransformers) TransformFromStorage(ctx context.Context, data []byte, dataCtx Context) ([]byte, bool, error) { start := time.Now() var errs []error + resource := getResourceFromContext(ctx) for i, transformer := range t.transformers { if bytes.HasPrefix(data, transformer.Prefix) { result, stale, err := transformer.Transformer.TransformFromStorage(ctx, data[len(transformer.Prefix):], dataCtx) @@ -116,9 +117,9 @@ func (t *prefixTransformers) TransformFromStorage(ctx context.Context, data []by continue } if len(transformer.Prefix) == 0 { - RecordTransformation("from_storage", "identity", time.Since(start), err) + RecordTransformation(resource, "from_storage", "identity", time.Since(start), err) } else { - RecordTransformation("from_storage", string(transformer.Prefix), time.Since(start), err) + RecordTransformation(resource, "from_storage", string(transformer.Prefix), time.Since(start), err) } // It is valid to have overlapping prefixes when the same encryption provider @@ -163,7 +164,7 @@ func (t *prefixTransformers) TransformFromStorage(ctx context.Context, data []by logTransformErr(ctx, err, "failed to decrypt data") return nil, false, err } - RecordTransformation("from_storage", "unknown", time.Since(start), t.err) + RecordTransformation(resource, "from_storage", "unknown", time.Since(start), t.err) return nil, false, t.err } @@ -171,8 +172,9 @@ func (t *prefixTransformers) TransformFromStorage(ctx context.Context, data []by func (t *prefixTransformers) TransformToStorage(ctx context.Context, data []byte, dataCtx Context) ([]byte, error) { start := time.Now() transformer := t.transformers[0] + resource := getResourceFromContext(ctx) result, err := transformer.Transformer.TransformToStorage(ctx, data, dataCtx) - RecordTransformation("to_storage", string(transformer.Prefix), time.Since(start), err) + RecordTransformation(resource, "to_storage", string(transformer.Prefix), time.Since(start), err) if err != nil { logTransformErr(ctx, err, "failed to encrypt data") return nil, err @@ -209,5 +211,11 @@ func getRequestInfoFromContext(ctx context.Context) *genericapirequest.RequestIn if reqInfo, found := genericapirequest.RequestInfoFrom(ctx); found { return reqInfo } + klog.V(4).InfoSDepth(1, "no request info on context") return &genericapirequest.RequestInfo{} } + +func getResourceFromContext(ctx context.Context) string { + reqInfo := getRequestInfoFromContext(ctx) + return schema.GroupResource{Group: reqInfo.APIGroup, Resource: reqInfo.Resource}.String() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go index 9a6b2a28e..e5365fc22 100644 --- a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go +++ b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go @@ -20,6 +20,12 @@ import ( "sort" flowcontrol "k8s.io/api/flowcontrol/v1" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" ) // SetFlowSchemaCondition sets conditions. @@ -98,3 +104,15 @@ func (s FlowSchemaSequence) Less(i, j int) bool { func (s FlowSchemaSequence) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +var metaInternalVersionCodecsWithCBOR = serializer.NewCodecFactory(metainternalversionscheme.Scheme, serializer.WithSerializer(cbor.NewSerializerInfo)) + +// GetMetaInternalVersionCodecs returns a negotiated serializer that recognizes the types from +// k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme.Scheme. It will or will include a CBOR +// serializer if CBOR is enabled. +func GetMetaInternalVersionCodecs() runtime.NegotiatedSerializer { + if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) { + return metaInternalVersionCodecsWithCBOR + } + return metainternalversionscheme.Codecs +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go index 71837edba..43bc29ab8 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go @@ -83,7 +83,7 @@ func NewWorkEstimator(objectCountFn objectCountGetterFunc, watchCountFn watchCou // WorkEstimatorFunc returns the estimated work of a given request. // This function will be used by the Priority & Fairness filter to -// estimate the work of of incoming requests. +// estimate the work of incoming requests. type WorkEstimatorFunc func(request *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate func (e WorkEstimatorFunc) EstimateWork(r *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate { diff --git a/vendor/k8s.io/apiserver/pkg/util/version/version.go b/vendor/k8s.io/apiserver/pkg/util/version/version.go deleted file mode 100644 index 694d27a99..000000000 --- a/vendor/k8s.io/apiserver/pkg/util/version/version.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "fmt" - "sync/atomic" - - "k8s.io/apimachinery/pkg/util/version" - baseversion "k8s.io/component-base/version" -) - -type EffectiveVersion interface { - BinaryVersion() *version.Version - EmulationVersion() *version.Version - MinCompatibilityVersion() *version.Version - EqualTo(other EffectiveVersion) bool - String() string - Validate() []error -} - -type MutableEffectiveVersion interface { - EffectiveVersion - Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) - SetEmulationVersion(emulationVersion *version.Version) - SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) -} - -type effectiveVersion struct { - // When true, BinaryVersion() returns the current binary version - useDefaultBuildBinaryVersion atomic.Bool - // Holds the last binary version stored in Set() - binaryVersion atomic.Pointer[version.Version] - // If the emulationVersion is set by the users, it could only contain major and minor versions. - // In tests, emulationVersion could be the same as the binary version, or set directly, - // which can have "alpha" as pre-release to continue serving expired apis while we clean up the test. - emulationVersion atomic.Pointer[version.Version] - // minCompatibilityVersion could only contain major and minor versions. - minCompatibilityVersion atomic.Pointer[version.Version] -} - -func (m *effectiveVersion) BinaryVersion() *version.Version { - if m.useDefaultBuildBinaryVersion.Load() { - return defaultBuildBinaryVersion() - } - return m.binaryVersion.Load() -} - -func (m *effectiveVersion) EmulationVersion() *version.Version { - ver := m.emulationVersion.Load() - if ver != nil { - // Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test. - // The pre-release should not be accessible to the users. - return ver.WithPreRelease(m.BinaryVersion().PreRelease()) - } - return ver -} - -func (m *effectiveVersion) MinCompatibilityVersion() *version.Version { - return m.minCompatibilityVersion.Load() -} - -func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool { - return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion()) -} - -func (m *effectiveVersion) String() string { - if m == nil { - return "" - } - return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}", - m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String()) -} - -func majorMinor(ver *version.Version) *version.Version { - if ver == nil { - return ver - } - return version.MajorMinor(ver.Major(), ver.Minor()) -} - -func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) { - m.binaryVersion.Store(binaryVersion) - m.useDefaultBuildBinaryVersion.Store(false) - m.emulationVersion.Store(majorMinor(emulationVersion)) - m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) -} - -func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) { - m.emulationVersion.Store(majorMinor(emulationVersion)) -} - -func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) { - m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) -} - -func (m *effectiveVersion) Validate() []error { - var errs []error - // Validate only checks the major and minor versions. - binaryVersion := m.BinaryVersion().WithPatch(0) - emulationVersion := m.emulationVersion.Load() - minCompatibilityVersion := m.minCompatibilityVersion.Load() - - // emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}. - maxEmuVer := binaryVersion - minEmuVer := binaryVersion.SubtractMinor(1) - if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) { - errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String())) - } - // minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha. - maxCompVer := binaryVersion.SubtractMinor(1) - minCompVer := binaryVersion.SubtractMinor(1) - if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) { - errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String())) - } - return errs -} - -func newEffectiveVersion(binaryVersion *version.Version, useDefaultBuildBinaryVersion bool) MutableEffectiveVersion { - effective := &effectiveVersion{} - compatVersion := binaryVersion.SubtractMinor(1) - effective.Set(binaryVersion, binaryVersion, compatVersion) - effective.useDefaultBuildBinaryVersion.Store(useDefaultBuildBinaryVersion) - return effective -} - -func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion { - if binaryVer == "" { - return &effectiveVersion{} - } - binaryVersion := version.MustParse(binaryVer) - return newEffectiveVersion(binaryVersion, false) -} - -func defaultBuildBinaryVersion() *version.Version { - verInfo := baseversion.Get() - return version.MustParse(verInfo.String()).WithInfo(verInfo) -} - -// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the -// current build information. -func DefaultBuildEffectiveVersion() MutableEffectiveVersion { - binaryVersion := defaultBuildBinaryVersion() - if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 { - return DefaultKubeEffectiveVersion() - } - return newEffectiveVersion(binaryVersion, true) -} - -// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the -// latest K8s release. -func DefaultKubeEffectiveVersion() MutableEffectiveVersion { - binaryVersion := version.MustParse(baseversion.DefaultKubeBinaryVersion).WithInfo(baseversion.Get()) - return newEffectiveVersion(binaryVersion, false) -} - -// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components. -// TODO: remove in 1.32 -// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31. -func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error { - binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor()) - if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) { - return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s", - binaryVersion.String(), effectiveVersion.EmulationVersion().String()) - } - return nil -} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index ebc4949d9..3df8e580e 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -82,8 +82,8 @@ type WebhookAuthorizer struct { } // NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client -func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, metrics metrics.AuthorizerMetrics) (*WebhookAuthorizer, error) { - return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, nil, metrics, "") +func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, metrics metrics.AuthorizerMetrics, compiler authorizationcel.Compiler) (*WebhookAuthorizer, error) { + return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, nil, metrics, compiler, "") } // New creates a new WebhookAuthorizer from the provided kubeconfig file. @@ -105,18 +105,18 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I // // For additional HTTP configuration, refer to the kubeconfig documentation // https://kubernetes.io/docs/user-guide/kubeconfig-file/. -func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, name string, metrics metrics.AuthorizerMetrics) (*WebhookAuthorizer, error) { +func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, name string, metrics metrics.AuthorizerMetrics, compiler authorizationcel.Compiler) (*WebhookAuthorizer, error) { subjectAccessReview, err := subjectAccessReviewInterfaceFromConfig(config, version, retryBackoff) if err != nil { return nil, err } - return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, matchConditions, metrics, name) + return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, matchConditions, metrics, compiler, name) } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, am metrics.AuthorizerMetrics, name string) (*WebhookAuthorizer, error) { +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, am metrics.AuthorizerMetrics, compiler authorizationcel.Compiler, name string) (*WebhookAuthorizer, error) { // compile all expressions once in validation and save the results to be used for eval later - cm, fieldErr := apiservervalidation.ValidateAndCompileMatchConditions(matchConditions) + cm, fieldErr := apiservervalidation.ValidateAndCompileMatchConditions(compiler, matchConditions) if err := fieldErr.ToAggregate(); err != nil { return nil, err } @@ -402,7 +402,7 @@ func labelSelectorToAuthorizationAPI(attr authorizer.Attributes) ([]metav1.Label } // TODO: need to finish the method to get the rules when using webhook mode -func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { +func (w *WebhookAuthorizer) RulesFor(ctx context.Context, user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { var ( resourceRules []authorizer.ResourceRuleInfo nonResourceRules []authorizer.NonResourceRuleInfo diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go index e8e371d7d..e840fe9eb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` - MatchPolicy *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` + MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` } // MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with @@ -42,7 +42,7 @@ func MatchResources() *MatchResourcesApplyConfiguration { // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { b.NamespaceSelector = value return b } @@ -50,7 +50,7 @@ func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.Label // WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObjectSelector field is set to the value of the last call. -func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { b.ObjectSelector = value return b } @@ -84,7 +84,7 @@ func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*N // WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MatchPolicy field is set to the value of the last call. -func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value apiadmissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration { +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration { b.MatchPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go index 58b71d6d5..4267f5fbf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } // MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with @@ -56,18 +56,18 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { +func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "") } // ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { +func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status") } -func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) { +func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) { b := &MutatingWebhookConfigurationApplyConfiguration{} err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmiss // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value st // If called multiple times, the Name field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) // If called multiple times, the GenerateName field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value // If called multiple times, the Namespace field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str // If called multiple times, the UID field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(val // If called multiple times, the Generation field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { +func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { +func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(v // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriod // overwriting an existing map entries in Labels field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[ // overwriting an existing map entries in Annotations field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingWebhookConfigurationApplyConfiguration { +func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *MutatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,5 +254,5 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... // GetName retrieves the value of the Name field in the declarative configuration. func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go index eda3bf635..dd31981ad 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go @@ -50,7 +50,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ... // If called multiple times, values provided by each call will be appended to the Operations field. func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Operations = append(b.Operations, values[i]) + b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i]) } return b } @@ -60,7 +60,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -70,7 +70,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -80,7 +80,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st // If called multiple times, values provided by each call will be appended to the Resources field. func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -89,6 +89,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { - b.Scope = &value + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go index 73cda9b04..140233f6b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go @@ -20,7 +20,7 @@ package v1 import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use @@ -28,7 +28,7 @@ import ( type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } @@ -57,7 +57,7 @@ func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyC // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { +func (b *ParamRefApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { b.Selector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go index 36a93643c..a8c68136b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) // RuleApplyConfiguration represents a declarative configuration of the Rule type for use // with apply. type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1.ScopeType `json:"scope,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + APIVersions []string `json:"apiVersions,omitempty"` + Resources []string `json:"resources,omitempty"` + Scope *admissionregistrationv1.ScopeType `json:"scope,omitempty"` } // RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with @@ -70,7 +70,7 @@ func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfi // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1.ScopeType) *RuleApplyConfiguration { +func (b *RuleApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *RuleApplyConfiguration { b.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go index 92bddd502..55a985f99 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) // RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use // with apply. type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` + Operations []admissionregistrationv1.OperationType `json:"operations,omitempty"` RuleApplyConfiguration `json:",inline"` } @@ -38,7 +38,7 @@ func RuleWithOperations() *RuleWithOperationsApplyConfiguration { // WithOperations adds the given value to the Operations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { +func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *RuleWithOperationsApplyConfiguration { for i := range values { b.Operations = append(b.Operations, values[i]) } @@ -50,7 +50,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.Opera // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -60,7 +60,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) * // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -70,7 +70,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) // If called multiple times, values provided by each call will be appended to the Resources field. func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -78,7 +78,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) * // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value +func (b *RuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *RuleWithOperationsApplyConfiguration { + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go index 841209cae..730de0369 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` - Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } // ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with @@ -57,18 +57,18 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") } // ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") } -func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { b := &ValidatingAdmissionPolicyApplyConfiguration{} err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionreg // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries ma // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go index 1acad056f..2921a711f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } // ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with @@ -56,18 +56,18 @@ func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBin // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") } // ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") } -func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(ent // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -249,5 +249,5 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go index 0d1a6c81a..a7bebb59f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } // ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with @@ -56,18 +56,18 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { +func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "") } // ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { +func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status") } -func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { +func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { b := &ValidatingWebhookConfigurationApplyConfiguration{} err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiad // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(valu // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.U // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(v // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { +func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { +func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeri // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries ma // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingWebhookConfigurationApplyConfiguration { +func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ValidatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,5 +254,5 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go index 2a828b6b4..9966a7a28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { - Expression *string `json:"expression,omitempty"` - Message *string `json:"message,omitempty"` - Reason *v1.StatusReason `json:"reason,omitempty"` - MessageExpression *string `json:"messageExpression,omitempty"` + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *metav1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` } // ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with @@ -56,7 +56,7 @@ func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationAppl // WithReason sets the Reason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reason field is set to the value of the last call. -func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { +func (b *ValidationApplyConfiguration) WithReason(value metav1.StatusReason) *ValidationApplyConfiguration { b.Reason = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go new file mode 100644 index 000000000..b08ac7224 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ApplyConfigurationApplyConfiguration represents a declarative configuration of the ApplyConfiguration type for use +// with apply. +type ApplyConfigurationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// ApplyConfigurationApplyConfiguration constructs a declarative configuration of the ApplyConfiguration type for use with +// apply. +func ApplyConfiguration() *ApplyConfigurationApplyConfiguration { + return &ApplyConfigurationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ApplyConfigurationApplyConfiguration) WithExpression(value string) *ApplyConfigurationApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go new file mode 100644 index 000000000..418d86a2b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// JSONPatchApplyConfiguration represents a declarative configuration of the JSONPatch type for use +// with apply. +type JSONPatchApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// JSONPatchApplyConfiguration constructs a declarative configuration of the JSONPatch type for use with +// apply. +func JSONPatch() *JSONPatchApplyConfiguration { + return &JSONPatchApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *JSONPatchApplyConfiguration) WithExpression(value string) *JSONPatchApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..d66071c18 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MutatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicy type for use +// with apply. +type MutatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` +} + +// MutatingAdmissionPolicy constructs a declarative configuration of the MutatingAdmissionPolicy type for use with +// apply. +func MutatingAdmissionPolicy(name string) *MutatingAdmissionPolicyApplyConfiguration { + b := &MutatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("MutatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a +// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) { + return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "") +} + +// ExtractMutatingAdmissionPolicyStatus is the same as ExtractMutatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMutatingAdmissionPolicyStatus(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) { + return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "status") +} + +func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) { + b := &MutatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(mutatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(mutatingAdmissionPolicy.Name) + + b.WithKind("MutatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithKind(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithName(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *MutatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithSpec(value *MutatingAdmissionPolicySpecApplyConfiguration) *MutatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..7cccd291b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MutatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBinding type for use +// with apply. +type MutatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// MutatingAdmissionPolicyBinding constructs a declarative configuration of the MutatingAdmissionPolicyBinding type for use with +// apply. +func MutatingAdmissionPolicyBinding(name string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b := &MutatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("MutatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a +// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractMutatingAdmissionPolicyBindingStatus is the same as ExtractMutatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMutatingAdmissionPolicyBindingStatus(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &MutatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(mutatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(mutatingAdmissionPolicyBinding.Name) + + b.WithKind("MutatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *MutatingAdmissionPolicyBindingSpecApplyConfiguration) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..04729f42b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MutatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use +// with apply. +type MutatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` +} + +// MutatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use with +// apply. +func MutatingAdmissionPolicyBindingSpec() *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + return &MutatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go new file mode 100644 index 000000000..334056a37 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go @@ -0,0 +1,113 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// MutatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicySpec type for use +// with apply. +type MutatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` + Mutations []MutationApplyConfiguration `json:"mutations,omitempty"` + FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` +} + +// MutatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicySpec type for use with +// apply. +func MutatingAdmissionPolicySpec() *MutatingAdmissionPolicySpecApplyConfiguration { + return &MutatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} + +// WithMutations adds the given value to the Mutations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mutations field. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMutations(values ...*MutationApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMutations") + } + b.Mutations = append(b.Mutations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *MutatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithReinvocationPolicy sets the ReinvocationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReinvocationPolicy field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithReinvocationPolicy(value v1.ReinvocationPolicyType) *MutatingAdmissionPolicySpecApplyConfiguration { + b.ReinvocationPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go new file mode 100644 index 000000000..4ed9d93fd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// MutationApplyConfiguration represents a declarative configuration of the Mutation type for use +// with apply. +type MutationApplyConfiguration struct { + PatchType *admissionregistrationv1alpha1.PatchType `json:"patchType,omitempty"` + ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"` + JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"` +} + +// MutationApplyConfiguration constructs a declarative configuration of the Mutation type for use with +// apply. +func Mutation() *MutationApplyConfiguration { + return &MutationApplyConfiguration{} +} + +// WithPatchType sets the PatchType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PatchType field is set to the value of the last call. +func (b *MutationApplyConfiguration) WithPatchType(value admissionregistrationv1alpha1.PatchType) *MutationApplyConfiguration { + b.PatchType = &value + return b +} + +// WithApplyConfiguration sets the ApplyConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ApplyConfiguration field is set to the value of the last call. +func (b *MutationApplyConfiguration) WithApplyConfiguration(value *ApplyConfigurationApplyConfiguration) *MutationApplyConfiguration { + b.ApplyConfiguration = value + return b +} + +// WithJSONPatch sets the JSONPatch field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JSONPatch field is set to the value of the last call. +func (b *MutationApplyConfiguration) WithJSONPatch(value *JSONPatchApplyConfiguration) *MutationApplyConfiguration { + b.JSONPatch = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go index 5e6744fd7..f630224ac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go @@ -51,7 +51,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ... // If called multiple times, values provided by each call will be appended to the Operations field. func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Operations = append(b.Operations, values[i]) + b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i]) } return b } @@ -61,7 +61,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -71,7 +71,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -81,7 +81,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st // If called multiple times, values provided by each call will be appended to the Resources field. func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -90,6 +90,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { - b.Scope = &value + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go index c4fff1d47..669fadbd4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go @@ -19,17 +19,17 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *admissionregistrationv1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } // ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with @@ -65,7 +65,7 @@ func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyCo // WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. -func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { b.ParameterNotFoundAction = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index fe60eb5f2..7fd1c0651 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 0c11ee594..ca8ac7dd0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(valu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimesta // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 51bb82389..15c54c125 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -84,7 +84,7 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admission // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value st // If called multiple times, the Name field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) // If called multiple times, the GenerateName field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value // If called multiple times, the Namespace field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str // If called multiple times, the UID field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(val // If called multiple times, the Generation field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value in // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(v // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(v // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriod // overwriting an existing map entries in Labels field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[ // overwriting an existing map entries in Annotations field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,5 +254,5 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... // GetName retrieves the value of the Name field in the declarative configuration. func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go index 5de70c7ad..62c617d2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go @@ -51,7 +51,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ... // If called multiple times, values provided by each call will be appended to the Operations field. func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Operations = append(b.Operations, values[i]) + b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i]) } return b } @@ -61,7 +61,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -71,7 +71,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -81,7 +81,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st // If called multiple times, values provided by each call will be appended to the Resources field. func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -90,6 +90,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { - b.Scope = &value + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go index 0a94ae067..5143b0cb9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *admissionregistrationv1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } // ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with @@ -65,7 +65,7 @@ func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyCo // WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. -func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { b.ParameterNotFoundAction = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go index c29ee56cb..35a8adbf7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 4347c4810..191d045ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(valu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimesta // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go index c3535c180..e775e55a3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -84,7 +84,7 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admis // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(valu // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.U // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(v // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeri // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries ma // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,5 +254,5 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . // GetName retrieves the value of the Name field in the declarative configuration. func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go index d734328b0..0061d8afb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -32,8 +32,8 @@ import ( type StorageVersionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *v1alpha1.StorageVersionSpec `json:"spec,omitempty"` - Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` + Spec *apiserverinternalv1alpha1.StorageVersionSpec `json:"spec,omitempty"` + Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` } // StorageVersion constructs a declarative configuration of the StorageVersion type for use with @@ -57,18 +57,18 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { +func ExtractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { return extractStorageVersion(storageVersion, fieldManager, "") } // ExtractStorageVersionStatus is the same as ExtractStorageVersion except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStorageVersionStatus(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { +func ExtractStorageVersionStatus(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { return extractStorageVersion(storageVersion, fieldManager, "status") } -func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) { +func extractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) { b := &StorageVersionApplyConfiguration{} err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithKind(value string) *StorageVersionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *StorageVersionApplyConfiguration) WithKind(value string) *StorageVersio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithAPIVersion(value string) *StorageVersionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *StorageVersionApplyConfiguration) WithAPIVersion(value string) *Storage // If called multiple times, the Name field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithName(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *StorageVersionApplyConfiguration) WithName(value string) *StorageVersio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithGenerateName(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *StorageVersionApplyConfiguration) WithGenerateName(value string) *Stora // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageV // If called multiple times, the UID field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithUID(value types.UID) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *StorageVersionApplyConfiguration) WithUID(value types.UID) *StorageVers // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithResourceVersion(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *StorageVersionApplyConfiguration) WithResourceVersion(value string) *St // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithGeneration(value int64) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *StorageVersionApplyConfiguration) WithGeneration(value int64) *StorageV // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *StorageVersionApplyConfiguration) WithCreationTimestamp(value metav1.Ti // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *StorageVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *StorageVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *StorageVersionApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *StorageVersionApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *StorageVersionApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *StorageVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *StorageVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *StorageVersionApplyConfiguration) WithFinalizers(values ...string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -242,7 +242,7 @@ func (b *StorageVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExi // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *StorageVersionApplyConfiguration) WithSpec(value v1alpha1.StorageVersionSpec) *StorageVersionApplyConfiguration { +func (b *StorageVersionApplyConfiguration) WithSpec(value apiserverinternalv1alpha1.StorageVersionSpec) *StorageVersionApplyConfiguration { b.Spec = &value return b } @@ -258,5 +258,5 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu // GetName retrieves the value of the Name field in the declarative configuration. func (b *StorageVersionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go index 68d894d0c..1ed71cf8e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go @@ -19,19 +19,19 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use // with apply. type StorageVersionConditionApplyConfiguration struct { - Type *v1alpha1.StorageVersionConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *apiserverinternalv1alpha1.StorageVersionConditionType `json:"type,omitempty"` + Status *apiserverinternalv1alpha1.ConditionStatus `json:"status,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with @@ -43,7 +43,7 @@ func StorageVersionCondition() *StorageVersionConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StorageVersionConditionApplyConfiguration) WithType(value v1alpha1.StorageVersionConditionType) *StorageVersionConditionApplyConfiguration { +func (b *StorageVersionConditionApplyConfiguration) WithType(value apiserverinternalv1alpha1.StorageVersionConditionType) *StorageVersionConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *StorageVersionConditionApplyConfiguration) WithType(value v1alpha1.Stor // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *StorageVersionConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *StorageVersionConditionApplyConfiguration { +func (b *StorageVersionConditionApplyConfiguration) WithStatus(value apiserverinternalv1alpha1.ConditionStatus) *StorageVersionConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go index 25b645059..bfdad4a73 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go @@ -20,21 +20,21 @@ package v1 import ( appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Data *runtime.RawExtension `json:"data,omitempty"` - Revision *int64 `json:"revision,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Data *runtime.RawExtension `json:"data,omitempty"` + Revision *int64 `json:"revision,omitempty"` } // ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with @@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con // If called multiple times, the Name field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont // If called multiple times, the UID field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { +func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { +func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ControllerRevisionApplyConfiguration { +func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ControllerRevisionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -261,5 +261,5 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro // GetName retrieves the value of the Name field in the declarative configuration. func (b *ControllerRevisionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go index a15785651..47883d043 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } // DaemonSet constructs a declarative configuration of the DaemonSet type for use with @@ -58,18 +58,18 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { +func ExtractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { return extractDaemonSet(daemonSet, fieldManager, "") } // ExtractDaemonSetStatus is the same as ExtractDaemonSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractDaemonSetStatus(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { +func ExtractDaemonSetStatus(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { return extractDaemonSet(daemonSet, fieldManager, "status") } -func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { +func extractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { b := &DaemonSetApplyConfiguration{} err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp // If called multiple times, the Name field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA // If called multiple times, the Namespace field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl // If called multiple times, the UID field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS // If called multiple times, the Generation field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { +func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { +func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae // overwriting an existing map entries in Annotations field with the same key. func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DaemonSetApplyConfiguration { +func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *DaemonSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf // GetName retrieves the value of the Name field in the declarative configuration. func (b *DaemonSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go index de91745b8..8c56e4994 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { - Type *v1.DaemonSetConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.DaemonSetConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with @@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetConditionApplyConfiguration) WithType(value v1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { +func (b *DaemonSetConditionApplyConfiguration) WithType(value appsv1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go index 99dc5abae..d2382b80e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go @@ -20,13 +20,13 @@ package v1 import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -42,7 +42,7 @@ func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *DaemonSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *DaemonSetSpecApplyConfiguration { +func (b *DaemonSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *DaemonSetSpecApplyConfiguration { b.Selector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go index 15af4e66b..993e1bd57 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) // DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { - Type *v1.DaemonSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { +func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value appsv1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go index 52b7a21b7..485357c00 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"` - Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"` + Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } // Deployment constructs a declarative configuration of the Deployment type for use with @@ -58,18 +58,18 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractDeployment(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { +func ExtractDeployment(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { return extractDeployment(deployment, fieldManager, "") } // ExtractDeploymentStatus is the same as ExtractDeployment except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractDeploymentStatus(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { +func ExtractDeploymentStatus(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { return extractDeployment(deployment, fieldManager, "status") } -func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { +func extractDeployment(deployment *appsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { b := &DeploymentApplyConfiguration{} err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { +func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { +func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeploymentApplyConfiguration { +func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *DeploymentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go index 84df752bc..3a6693637 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,12 +27,12 @@ import ( // DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1.DeploymentConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.DeploymentConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go index 063f1c276..5f34b0582 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go @@ -20,14 +20,14 @@ package v1 import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -53,7 +53,7 @@ func (b *DeploymentSpecApplyConfiguration) WithReplicas(value int32) *Deployment // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *DeploymentSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *DeploymentSpecApplyConfiguration { +func (b *DeploymentSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *DeploymentSpecApplyConfiguration { b.Selector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go index dc4b97c55..7bf8a1595 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) // DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1.DeploymentStrategyType `json:"type,omitempty"` + Type *appsv1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go index 35ca4e4df..6e9c0e14f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } // ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with @@ -58,18 +58,18 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { +func ExtractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { return extractReplicaSet(replicaSet, fieldManager, "") } // ExtractReplicaSetStatus is the same as ExtractReplicaSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractReplicaSetStatus(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { +func ExtractReplicaSetStatus(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { return extractReplicaSet(replicaSet, fieldManager, "status") } -func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { +func extractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { b := &ReplicaSetApplyConfiguration{} err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { +func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { +func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ReplicaSetApplyConfiguration { +func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ReplicaSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *ReplicaSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go index 32da80842..0325ce058 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { - Type *v1.ReplicaSetConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.ReplicaSetConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with @@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { +func (b *ReplicaSetConditionApplyConfiguration) WithType(value appsv1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go index 039058486..714ddcfe3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go @@ -20,7 +20,7 @@ package v1 import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use @@ -28,7 +28,7 @@ import ( type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } @@ -57,7 +57,7 @@ func (b *ReplicaSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *Rep // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *ReplicaSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ReplicaSetSpecApplyConfiguration { +func (b *ReplicaSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ReplicaSetSpecApplyConfiguration { b.Selector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go index 6f2b340da..cb5306935 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } // StatefulSet constructs a declarative configuration of the StatefulSet type for use with @@ -58,18 +58,18 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { +func ExtractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { return extractStatefulSet(statefulSet, fieldManager, "") } // ExtractStatefulSetStatus is the same as ExtractStatefulSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStatefulSetStatus(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { +func ExtractStatefulSetStatus(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { return extractStatefulSet(statefulSet, fieldManager, "status") } -func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { +func extractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { b := &StatefulSetApplyConfiguration{} err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe // If called multiple times, the Name field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful // If called multiple times, the Namespace field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet // If called multiple times, the UID field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State // If called multiple times, the Generation field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { +func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { +func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StatefulSetApplyConfiguration { +func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *StatefulSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *StatefulSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go index c62a5e854..45b2ad81f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { - Type *v1.StatefulSetConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.StatefulSetConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with @@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetConditionApplyConfiguration) WithType(value v1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { +func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go index cd65fd436..dff3e2a76 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) // StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { - WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` - WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` + WhenDeleted *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } // StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with @@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol // WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenDeleted field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenDeleted = &value return b } @@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With // WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenScaled field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenScaled = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index 1848a963c..c48b64fe3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -21,14 +21,14 @@ package v1 import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` ServiceName *string `json:"serviceName,omitempty"` @@ -57,7 +57,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithReplicas(value int32) *StatefulS // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *StatefulSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *StatefulSetSpecApplyConfiguration { +func (b *StatefulSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *StatefulSetSpecApplyConfiguration { b.Selector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go index b59e10735..ae135d34d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) // StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { - Type *v1.StatefulSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go index 606de58a1..910dd7bec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" @@ -59,18 +59,18 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "") } // ExtractControllerRevisionStatus is the same as ExtractControllerRevision except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractControllerRevisionStatus(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevisionStatus(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "status") } -func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { +func extractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con // If called multiple times, the Name field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont // If called multiple times, the UID field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *Cont // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -261,5 +261,5 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro // GetName retrieves the value of the Name field in the declarative configuration. func (b *ControllerRevisionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go index 145aaed70..057ea5b6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go @@ -87,7 +87,7 @@ func extractDeployment(deployment *appsv1beta1.Deployment, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *DeploymentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go index 504dddd94..b0a45b1a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,12 +27,12 @@ import ( // DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta1.DeploymentConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go index 2c322b4ac..03e66555a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" ) // DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` + Type *appsv1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go index 270593886..ba8aa3a4c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go @@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe // If called multiple times, the Name field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful // If called multiple times, the Namespace field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet // If called multiple times, the UID field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State // If called multiple times, the Generation field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSet // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *StatefulSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go index 8a17391cd..5a13584bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { - Type *v1beta1.StatefulSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta1.StatefulSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with @@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetConditionApplyConfiguration) WithType(value v1beta1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { +func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1beta1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go index 69a8ee0f0..f9b6fbd88 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -19,14 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" ) // StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { - WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` - WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` + WhenDeleted *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } // StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with @@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol // WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenDeleted field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenDeleted = &value return b } @@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With // WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenScaled field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenScaled = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index ac325d717..137c7243b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" corev1 "k8s.io/client-go/applyconfigurations/core/v1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -32,7 +32,7 @@ type StatefulSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + PodManagementPolicy *appsv1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -94,7 +94,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithServiceName(value string) *State // WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodManagementPolicy field is set to the value of the last call. -func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value v1beta1.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { +func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value appsv1beta1.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { b.PodManagementPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go index 7714ebbb7..24154f7af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" ) // StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { - Type *v1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1beta1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go index 5f75a4551..6facd5384 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" @@ -59,18 +59,18 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "") } // ExtractControllerRevisionStatus is the same as ExtractControllerRevision except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractControllerRevisionStatus(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevisionStatus(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "status") } -func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { +func extractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con // If called multiple times, the Name field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont // If called multiple times, the UID field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *Cont // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -261,5 +261,5 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro // GetName retrieves the value of the Name field in the declarative configuration. func (b *ControllerRevisionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go index 9ffda6182..89a2ebd4b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go @@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp // If called multiple times, the Name field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA // If called multiple times, the Namespace field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl // If called multiple times, the UID field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS // If called multiple times, the Generation field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae // overwriting an existing map entries in Annotations field with the same key. func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf // GetName retrieves the value of the Name field in the declarative configuration. func (b *DaemonSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go index 8315050f0..0aa47cf0a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { - Type *v1beta2.DaemonSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.DaemonSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with @@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetConditionApplyConfiguration) WithType(value v1beta2.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { +func (b *DaemonSetConditionApplyConfiguration) WithType(value appsv1beta2.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go index 7d66f1da4..2cee58cf3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) // DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { - Type *v1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1beta2.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { +func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta2.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go index 485da788a..8948cc606 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go @@ -87,7 +87,7 @@ func extractDeployment(deployment *appsv1beta2.Deployment, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *DeploymentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go index 192427874..f404dd9df 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,12 +27,12 @@ import ( // DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1beta2.DeploymentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.DeploymentConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta2.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1beta2.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go index c769436ee..6347a3a39 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) // DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1beta2.DeploymentStrategyType `json:"type,omitempty"` + Type *appsv1beta2.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta2.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1beta2.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go index d8608aa51..679416b21 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go @@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *ReplicaSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go index beec546f7..3d8cd3632 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { - Type *v1beta2.ReplicaSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.ReplicaSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with @@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1beta2.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { +func (b *ReplicaSetConditionApplyConfiguration) WithType(value appsv1beta2.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go index 126ab2d8b..27067b6aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" @@ -30,8 +30,8 @@ import ( type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *v1beta2.ScaleSpec `json:"spec,omitempty"` - Status *v1beta2.ScaleStatus `json:"status,omitempty"` + Spec *appsv1beta2.ScaleSpec `json:"spec,omitempty"` + Status *appsv1beta2.ScaleStatus `json:"status,omitempty"` } // ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with @@ -47,7 +47,7 @@ func Scale() *ScaleApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S // overwriting an existing map entries in Labels field with the same key. func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp // overwriting an existing map entries in Annotations field with the same key. func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -204,7 +204,7 @@ func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithSpec(value appsv1beta2.ScaleSpec) *ScaleApplyConfiguration { b.Spec = &value return b } @@ -212,7 +212,7 @@ func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyC // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithStatus(value appsv1beta2.ScaleStatus) *ScaleApplyConfiguration { b.Status = &value return b } @@ -220,5 +220,5 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleAp // GetName retrieves the value of the Name field in the declarative configuration. func (b *ScaleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go index 3d2b5d191..933072421 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go @@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe // If called multiple times, the Name field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful // If called multiple times, the Namespace field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet // If called multiple times, the UID field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State // If called multiple times, the Generation field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSet // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *StatefulSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go index aa45db686..50bef2003 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { - Type *v1beta2.StatefulSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.StatefulSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with @@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetConditionApplyConfiguration) WithType(value v1beta2.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { +func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1beta2.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go index 318e5f464..d4d139ae3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -19,14 +19,14 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) // StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { - WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` - WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` + WhenDeleted *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } // StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with @@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol // WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenDeleted field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenDeleted = &value return b } @@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With // WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenScaled field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenScaled = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index bebf80c89..952ca0a81 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" corev1 "k8s.io/client-go/applyconfigurations/core/v1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -32,7 +32,7 @@ type StatefulSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + PodManagementPolicy *appsv1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -94,7 +94,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithServiceName(value string) *State // WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodManagementPolicy field is set to the value of the last call. -func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value v1beta2.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { +func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value appsv1beta2.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { b.PodManagementPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go index 81d4ba1df..f93db4f79 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) // StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { - Type *v1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1beta2.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta2.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go index 8150635ee..8c9f08a73 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiautoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"` - Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"` + Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } // HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with @@ -58,18 +58,18 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { +func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") } // ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") } -func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { b := &HorizontalPodAutoscalerApplyConfiguration{} err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.Ho // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { +func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { +func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[ // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { +func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal // GetName retrieves the value of the Name field in the declarative configuration. func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go index fcb231c3b..8575214e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"` - CurrentReplicas *int32 `json:"currentReplicas,omitempty"` - DesiredReplicas *int32 `json:"desiredReplicas,omitempty"` - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty"` + CurrentReplicas *int32 `json:"currentReplicas,omitempty"` + DesiredReplicas *int32 `json:"desiredReplicas,omitempty"` + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` } // HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with @@ -49,7 +49,7 @@ func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithObservedGeneration // WithLastScaleTime sets the LastScaleTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastScaleTime field is set to the value of the last call. -func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value v1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration { +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value metav1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration { b.LastScaleTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go index 40f3db8c5..13ae8e142 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"` - Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` } // ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with @@ -46,7 +46,7 @@ func Scale() *ScaleApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -54,7 +54,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -63,7 +63,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -72,7 +72,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -81,7 +81,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -90,7 +90,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -99,7 +99,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -108,25 +108,25 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -135,7 +135,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -145,11 +145,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S // overwriting an existing map entries in Labels field with the same key. func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -160,11 +160,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp // overwriting an existing map entries in Annotations field with the same key. func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -172,13 +172,13 @@ func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *Sc // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -189,14 +189,14 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -219,5 +219,5 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio // GetName retrieves the value of the Name field in the declarative configuration. func (b *ScaleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go index e26b530c1..99a5cd4bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.Horiz // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal // GetName retrieves the value of the Name field in the declarative configuration. func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go index 844c6dc86..25ea39039 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -19,7 +19,7 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { - Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *autoscalingv2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with @@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go index b8b735747..f89185c57 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -19,15 +19,15 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) // HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { - Type *v2.HPAScalingPolicyType `json:"type,omitempty"` - Value *int32 `json:"value,omitempty"` - PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + Type *autoscalingv2.HPAScalingPolicyType `json:"type,omitempty"` + Value *int32 `json:"value,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } // HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with @@ -39,7 +39,7 @@ func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { +func (b *HPAScalingPolicyApplyConfiguration) WithType(value autoscalingv2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go index c7020f77b..6a6a2655f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -19,14 +19,14 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) // HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` - SelectPolicy *v2.ScalingPolicySelect `json:"selectPolicy,omitempty"` + SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"` Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } @@ -47,7 +47,7 @@ func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value // WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectPolicy field is set to the value of the last call. -func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { +func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value autoscalingv2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { b.SelectPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go index 89e6b5c68..282b84a44 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -19,13 +19,13 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) // MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { - Type *v2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` @@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricSpecApplyConfiguration) WithType(value v2.MetricSourceType) *MetricSpecApplyConfiguration { +func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2.MetricSourceType) *MetricSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go index 86ae3348b..f1204824e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) // MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { - Type *v2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` @@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricStatusApplyConfiguration) WithType(value v2.MetricSourceType) *MetricStatusApplyConfiguration { +func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2.MetricSourceType) *MetricStatusApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go index bf68a1c34..13d2e9365 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -19,17 +19,17 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" resource "k8s.io/apimachinery/pkg/api/resource" ) // MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { - Type *v2.MetricTargetType `json:"type,omitempty"` - Value *resource.Quantity `json:"value,omitempty"` - AverageValue *resource.Quantity `json:"averageValue,omitempty"` - AverageUtilization *int32 `json:"averageUtilization,omitempty"` + Type *autoscalingv2.MetricTargetType `json:"type,omitempty"` + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` } // MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with @@ -41,7 +41,7 @@ func MetricTarget() *MetricTargetApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricTargetApplyConfiguration) WithType(value v2.MetricTargetType) *MetricTargetApplyConfiguration { +func (b *MetricTargetApplyConfiguration) WithType(value autoscalingv2.MetricTargetType) *MetricTargetApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go index 93e37eaff..51ae84901 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal // GetName retrieves the value of the Name field in the declarative configuration. func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go index 8bb82298d..445cd55ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { - Type *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *autoscalingv2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with @@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2beta1.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2beta1.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go index 961e2c5b4..3a5faa3b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" ) // MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { - Type *v2beta1.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` @@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricSpecApplyConfiguration) WithType(value v2beta1.MetricSourceType) *MetricSpecApplyConfiguration { +func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2beta1.MetricSourceType) *MetricSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go index 587b5a1f8..f281e182d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" ) // MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { - Type *v2beta1.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` @@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricStatusApplyConfiguration) WithType(value v2beta1.MetricSourceType) *MetricStatusApplyConfiguration { +func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2beta1.MetricSourceType) *MetricStatusApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go index ce666f0f3..19794ff42 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal // GetName retrieves the value of the Name field in the declarative configuration. func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go index a73e7ebaa..f88869124 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,11 +27,11 @@ import ( // HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { - Type *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *autoscalingv2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with @@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2beta2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2beta2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go index b799f99e0..2bbbbddec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go @@ -19,15 +19,15 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) // HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { - Type *v2beta2.HPAScalingPolicyType `json:"type,omitempty"` - Value *int32 `json:"value,omitempty"` - PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + Type *autoscalingv2beta2.HPAScalingPolicyType `json:"type,omitempty"` + Value *int32 `json:"value,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } // HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with @@ -39,7 +39,7 @@ func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2beta2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { +func (b *HPAScalingPolicyApplyConfiguration) WithType(value autoscalingv2beta2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go index f7e8d9ae3..92aa449aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go @@ -19,15 +19,15 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) // HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` - SelectPolicy *v2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"` - Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` + SelectPolicy *autoscalingv2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"` + Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } // HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with @@ -47,7 +47,7 @@ func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value // WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectPolicy field is set to the value of the last call. -func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2beta2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { +func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value autoscalingv2beta2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { b.SelectPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go index 3ec710861..3da1617cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) // MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { - Type *v2beta2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` @@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricSpecApplyConfiguration) WithType(value v2beta2.MetricSourceType) *MetricSpecApplyConfiguration { +func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2beta2.MetricSourceType) *MetricSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go index 40d32795b..b528bd760 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) // MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { - Type *v2beta2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` @@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricStatusApplyConfiguration) WithType(value v2beta2.MetricSourceType) *MetricStatusApplyConfiguration { +func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2beta2.MetricSourceType) *MetricStatusApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go index aeec3102e..286856d82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go @@ -19,17 +19,17 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" resource "k8s.io/apimachinery/pkg/api/resource" ) // MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { - Type *v2beta2.MetricTargetType `json:"type,omitempty"` - Value *resource.Quantity `json:"value,omitempty"` - AverageValue *resource.Quantity `json:"averageValue,omitempty"` - AverageUtilization *int32 `json:"averageUtilization,omitempty"` + Type *autoscalingv2beta2.MetricTargetType `json:"type,omitempty"` + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` } // MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with @@ -41,7 +41,7 @@ func MetricTarget() *MetricTargetApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricTargetApplyConfiguration) WithType(value v2beta2.MetricTargetType) *MetricTargetApplyConfiguration { +func (b *MetricTargetApplyConfiguration) WithType(value autoscalingv2beta2.MetricTargetType) *MetricTargetApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go index 8b26816e5..f96cba1c5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apibatchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + batchv1 "k8s.io/api/batch/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"` - Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"` + Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } // CronJob constructs a declarative configuration of the CronJob type for use with @@ -58,18 +58,18 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCronJob(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { +func ExtractCronJob(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { return extractCronJob(cronJob, fieldManager, "") } // ExtractCronJobStatus is the same as ExtractCronJob except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCronJobStatus(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { +func ExtractCronJobStatus(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { return extractCronJob(cronJob, fieldManager, "status") } -func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) { +func extractCronJob(cronJob *batchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) { b := &CronJobApplyConfiguration{} err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CronJobApplyConfiguration { +func (b *CronJobApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CronJobApplyConfiguration { +func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Cr // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJ // overwriting an existing map entries in Annotations field with the same key. func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CronJobApplyConfiguration { +func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CronJobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *CronJobApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go index 62f9b5298..f53d140d3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" ) // CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use @@ -28,7 +28,7 @@ type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` TimeZone *string `json:"timeZone,omitempty"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` - ConcurrencyPolicy *v1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + ConcurrencyPolicy *batchv1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` Suspend *bool `json:"suspend,omitempty"` JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"` SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"` @@ -68,7 +68,7 @@ func (b *CronJobSpecApplyConfiguration) WithStartingDeadlineSeconds(value int64) // WithConcurrencyPolicy sets the ConcurrencyPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConcurrencyPolicy field is set to the value of the last call. -func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value v1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { +func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value batchv1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { b.ConcurrencyPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go index 095dfe017..d29d9e892 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go @@ -20,15 +20,15 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) // CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { - Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` - LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` - LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` + Active []corev1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` + LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } // CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with @@ -40,7 +40,7 @@ func CronJobStatus() *CronJobStatusApplyConfiguration { // WithActive adds the given value to the Active field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Active field. -func (b *CronJobStatusApplyConfiguration) WithActive(values ...*v1.ObjectReferenceApplyConfiguration) *CronJobStatusApplyConfiguration { +func (b *CronJobStatusApplyConfiguration) WithActive(values ...*corev1.ObjectReferenceApplyConfiguration) *CronJobStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithActive") diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go index 1333e9184..e508f1441 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apibatchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + batchv1 "k8s.io/api/batch/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // JobApplyConfiguration represents a declarative configuration of the Job type for use // with apply. type JobApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` - Status *JobStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` + Status *JobStatusApplyConfiguration `json:"status,omitempty"` } // Job constructs a declarative configuration of the Job type for use with @@ -58,18 +58,18 @@ func Job(name, namespace string) *JobApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractJob(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { +func ExtractJob(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { return extractJob(job, fieldManager, "") } // ExtractJobStatus is the same as ExtractJob except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractJobStatus(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { +func ExtractJobStatus(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { return extractJob(job, fieldManager, "status") } -func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) { +func extractJob(job *batchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) { b := &JobApplyConfiguration{} err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (* // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *JobApplyConfiguration) WithKind(value string) *JobApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *JobApplyConfiguration) WithKind(value string) *JobApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *JobApplyConfiguration) WithAPIVersion(value string) *JobApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *JobApplyConfiguration) WithAPIVersion(value string) *JobApplyConfigurat // If called multiple times, the Name field is set to the value of the last call. func (b *JobApplyConfiguration) WithName(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *JobApplyConfiguration) WithName(value string) *JobApplyConfiguration { // If called multiple times, the GenerateName field is set to the value of the last call. func (b *JobApplyConfiguration) WithGenerateName(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *JobApplyConfiguration) WithGenerateName(value string) *JobApplyConfigur // If called multiple times, the Namespace field is set to the value of the last call. func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfigurati // If called multiple times, the UID field is set to the value of the last call. func (b *JobApplyConfiguration) WithUID(value types.UID) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *JobApplyConfiguration) WithUID(value types.UID) *JobApplyConfiguration // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *JobApplyConfiguration) WithResourceVersion(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *JobApplyConfiguration) WithResourceVersion(value string) *JobApplyConfi // If called multiple times, the Generation field is set to the value of the last call. func (b *JobApplyConfiguration) WithGeneration(value int64) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *JobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobApplyConfiguration { +func (b *JobApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *JobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobApplyConfiguration { +func (b *JobApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *JobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobApp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *JobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *JobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Job // overwriting an existing map entries in Labels field with the same key. func (b *JobApplyConfiguration) WithLabels(entries map[string]string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *JobApplyConfiguration) WithLabels(entries map[string]string) *JobApplyC // overwriting an existing map entries in Annotations field with the same key. func (b *JobApplyConfiguration) WithAnnotations(entries map[string]string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *JobApplyConfiguration) WithAnnotations(entries map[string]string) *JobA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *JobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *JobApplyConfiguration { +func (b *JobApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *JobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference func (b *JobApplyConfiguration) WithFinalizers(values ...string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *JobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) * // GetName retrieves the value of the Name field in the declarative configuration. func (b *JobApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go index 4f15bc604..fb3c65aba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,12 +27,12 @@ import ( // JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use // with apply. type JobConditionApplyConfiguration struct { - Type *v1.JobConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *batchv1.JobConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with @@ -44,7 +44,7 @@ func JobCondition() *JobConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *JobConditionApplyConfiguration) WithType(value v1.JobConditionType) *JobConditionApplyConfiguration { +func (b *JobConditionApplyConfiguration) WithType(value batchv1.JobConditionType) *JobConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go index 901c4228e..b9666b03d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` } // JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with @@ -42,7 +42,7 @@ func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateS // If called multiple times, the GenerateName field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobT // If called multiple times, the Namespace field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp // If called multiple times, the UID field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,25 +87,25 @@ func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *J // If called multiple times, the Generation field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { +func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { +func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -151,13 +151,13 @@ func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *JobTemplateSpecApplyConfiguration { +func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,14 +168,14 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *JobTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -190,5 +190,5 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *JobTemplateSpecApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go index cd32296ca..aa4dfc4c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" ) // PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use // with apply. type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { - ContainerName *string `json:"containerName,omitempty"` - Operator *v1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"` - Values []int32 `json:"values,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + Operator *batchv1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"` + Values []int32 `json:"values,omitempty"` } // PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with @@ -47,7 +47,7 @@ func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithContainer // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value v1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value batchv1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go index 07af4fb0e..6459a6e59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use // with apply. type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct { - Type *v1.PodConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` + Type *corev1.PodConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` } // PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with @@ -38,7 +38,7 @@ func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPa // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value v1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value corev1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { b.Type = &value return b } @@ -46,7 +46,7 @@ func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(valu // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go index b004921d3..847ec7c95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" ) // PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use // with apply. type PodFailurePolicyRuleApplyConfiguration struct { - Action *v1.PodFailurePolicyAction `json:"action,omitempty"` + Action *batchv1.PodFailurePolicyAction `json:"action,omitempty"` OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"` OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"` } @@ -39,7 +39,7 @@ func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { // WithAction sets the Action field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Action field is set to the value of the last call. -func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value v1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration { +func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value batchv1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration { b.Action = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go index 765ed5e65..133ed36fa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go @@ -87,7 +87,7 @@ func extractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string, subresou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Cr // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Cr // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJ // overwriting an existing map entries in Annotations field with the same key. func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *CronJobApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go index 21043690d..30604ac7e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/batch/v1beta1" + batchv1beta1 "k8s.io/api/batch/v1beta1" ) // CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use @@ -28,7 +28,7 @@ type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` TimeZone *string `json:"timeZone,omitempty"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` - ConcurrencyPolicy *v1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + ConcurrencyPolicy *batchv1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` Suspend *bool `json:"suspend,omitempty"` JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"` SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"` @@ -68,7 +68,7 @@ func (b *CronJobSpecApplyConfiguration) WithStartingDeadlineSeconds(value int64) // WithConcurrencyPolicy sets the ConcurrencyPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConcurrencyPolicy field is set to the value of the last call. -func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value v1beta1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { +func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value batchv1beta1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { b.ConcurrencyPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go index 5fd2485c6..4106b8e55 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go @@ -43,7 +43,7 @@ func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -52,7 +52,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateS // If called multiple times, the GenerateName field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -61,7 +61,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobT // If called multiple times, the Namespace field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -70,7 +70,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp // If called multiple times, the UID field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -79,7 +79,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -88,7 +88,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *J // If called multiple times, the Generation field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -97,7 +97,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -106,7 +106,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.T // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -115,7 +115,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -125,11 +125,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -140,11 +140,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -158,7 +158,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -169,7 +169,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -191,5 +191,5 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *JobTemplateSpecApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go index e30bb6242..998e5723c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicertificatesv1 "k8s.io/api/certificates/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + certificatesv1 "k8s.io/api/certificates/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"` - Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"` + Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } // CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with @@ -57,18 +57,18 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { +func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "") } // ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCertificateSigningRequestStatus(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { +func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status") } -func extractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) { +func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) { b := &CertificateSigningRequestApplyConfiguration{} err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractCertificateSigningRequest(certificateSigningRequest *apicertificates // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *CertificateSigningRequestApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *Ce // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *Ce // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { +func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { +func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries ma // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApplyConfiguration { +func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CertificateSigningRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific // GetName retrieves the value of the Name field in the declarative configuration. func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go index 7a4bfce01..a6dedcb59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/certificates/v1" + certificatesv1 "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,12 +27,12 @@ import ( // CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { - Type *v1.RequestConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Type *certificatesv1.RequestConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } // CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with @@ -44,7 +44,7 @@ func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApp // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value v1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { +func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value certificatesv1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go index 9c4a85693..82da53c9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/certificates/v1" + certificatesv1 "k8s.io/api/certificates/v1" ) // CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { - Request []byte `json:"request,omitempty"` - SignerName *string `json:"signerName,omitempty"` - ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` - Usages []v1.KeyUsage `json:"usages,omitempty"` - Username *string `json:"username,omitempty"` - UID *string `json:"uid,omitempty"` - Groups []string `json:"groups,omitempty"` - Extra map[string]v1.ExtraValue `json:"extra,omitempty"` + Request []byte `json:"request,omitempty"` + SignerName *string `json:"signerName,omitempty"` + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` + Usages []certificatesv1.KeyUsage `json:"usages,omitempty"` + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]certificatesv1.ExtraValue `json:"extra,omitempty"` } // CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with @@ -70,7 +70,7 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds( // WithUsages adds the given value to the Usages field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Usages field. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...v1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...certificatesv1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { for i := range values { b.Usages = append(b.Usages, values[i]) } @@ -107,9 +107,9 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithGroups(values ...s // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Extra field, // overwriting an existing map entries in Extra field with the same key. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]v1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]certificatesv1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { if b.Extra == nil && len(entries) > 0 { - b.Extra = make(map[string]v1.ExtraValue, len(entries)) + b.Extra = make(map[string]certificatesv1.ExtraValue, len(entries)) } for k, v := range entries { b.Extra[k] = v diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 9cd10bc56..6ae6b269d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -84,7 +84,7 @@ func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterT // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTr // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTr // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundl // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterTrustBundleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go index d6e08824a..a1f57f268 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go @@ -85,7 +85,7 @@ func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1b // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *CertificateSigningRequestApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *Ce // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *Ce // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific // GetName retrieves the value of the Name field in the declarative configuration. func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go index 6e3692d1c..a845ec404 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/certificates/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,12 +27,12 @@ import ( // CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { - Type *v1beta1.RequestConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Type *certificatesv1beta1.RequestConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } // CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with @@ -44,7 +44,7 @@ func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApp // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value v1beta1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { +func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value certificatesv1beta1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go index 9284eca3a..ee4016c76 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go @@ -19,20 +19,20 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/certificates/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" ) // CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { - Request []byte `json:"request,omitempty"` - SignerName *string `json:"signerName,omitempty"` - ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` - Usages []v1beta1.KeyUsage `json:"usages,omitempty"` - Username *string `json:"username,omitempty"` - UID *string `json:"uid,omitempty"` - Groups []string `json:"groups,omitempty"` - Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` + Request []byte `json:"request,omitempty"` + SignerName *string `json:"signerName,omitempty"` + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` + Usages []certificatesv1beta1.KeyUsage `json:"usages,omitempty"` + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]certificatesv1beta1.ExtraValue `json:"extra,omitempty"` } // CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with @@ -70,7 +70,7 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds( // WithUsages adds the given value to the Usages field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Usages field. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...v1beta1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...certificatesv1beta1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { for i := range values { b.Usages = append(b.Usages, values[i]) } @@ -107,9 +107,9 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithGroups(values ...s // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Extra field, // overwriting an existing map entries in Extra field with the same key. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]v1beta1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]certificatesv1beta1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { if b.Extra == nil && len(entries) > 0 { - b.Extra = make(map[string]v1beta1.ExtraValue, len(entries)) + b.Extra = make(map[string]certificatesv1beta1.ExtraValue, len(entries)) } for k, v := range entries { b.Extra[k] = v diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go index ffd84583f..2a69e773c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apicoordinationv1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coordinationv1 "k8s.io/api/coordination/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } // Lease constructs a declarative configuration of the Lease type for use with @@ -57,18 +57,18 @@ func Lease(name, namespace string) *LeaseApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractLease(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { +func ExtractLease(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { return extractLease(lease, fieldManager, "") } // ExtractLeaseStatus is the same as ExtractLease except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractLeaseStatus(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { +func ExtractLeaseStatus(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { return extractLease(lease, fieldManager, "status") } -func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) { +func extractLease(lease *coordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) { b := &LeaseApplyConfiguration{} err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseApplyConfiguration { +func (b *LeaseApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseApplyConfiguration { +func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Leas // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *L // overwriting an existing map entries in Labels field with the same key. func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseAp // overwriting an existing map entries in Annotations field with the same key. func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *Le // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseApplyConfiguration { +func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *LeaseApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -251,5 +251,5 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * // GetName retrieves the value of the Name field in the declarative configuration. func (b *LeaseApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go index 01d0df138..d0099872c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go @@ -20,7 +20,7 @@ package v1 import ( coordinationv1 "k8s.io/api/coordination/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use @@ -28,8 +28,8 @@ import ( type LeaseSpecApplyConfiguration struct { HolderIdentity *string `json:"holderIdentity,omitempty"` LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *metav1.MicroTime `json:"renewTime,omitempty"` LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` PreferredHolder *string `json:"preferredHolder,omitempty"` @@ -60,7 +60,7 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseDurationSeconds(value int32) *Lea // WithAcquireTime sets the AcquireTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AcquireTime field is set to the value of the last call. -func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value v1.MicroTime) *LeaseSpecApplyConfiguration { +func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value metav1.MicroTime) *LeaseSpecApplyConfiguration { b.AcquireTime = &value return b } @@ -68,7 +68,7 @@ func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value v1.MicroTime) *Lease // WithRenewTime sets the RenewTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RenewTime field is set to the value of the last call. -func (b *LeaseSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseSpecApplyConfiguration { +func (b *LeaseSpecApplyConfiguration) WithRenewTime(value metav1.MicroTime) *LeaseSpecApplyConfiguration { b.RenewTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go rename to vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go index ef7684779..b2cc2338e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration { b.WithName(name) b.WithNamespace(namespace) b.WithKind("LeaseCandidate") - b.WithAPIVersion("coordination.k8s.io/v1alpha1") + b.WithAPIVersion("coordination.k8s.io/v1alpha2") return b } @@ -57,20 +57,20 @@ func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { +func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { return extractLeaseCandidate(leaseCandidate, fieldManager, "") } // ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { +func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { return extractLeaseCandidate(leaseCandidate, fieldManager, "status") } -func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) { +func extractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) { b := &LeaseCandidateApplyConfiguration{} - err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha1.LeaseCandidate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha2.LeaseCandidate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, b.WithNamespace(leaseCandidate.Namespace) b.WithKind("LeaseCandidate") - b.WithAPIVersion("coordination.k8s.io/v1alpha1") + b.WithAPIVersion("coordination.k8s.io/v1alpha2") return b, nil } @@ -86,7 +86,7 @@ func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCa // If called multiple times, the Name field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidat // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *Lease // If called multiple times, the Namespace field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCan // If called multiple times, the UID field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandid // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *Le // If called multiple times, the Generation field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCan // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Ti // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.Own if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -251,5 +251,5 @@ func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApp // GetName retrieves the value of the Name field in the declarative configuration. func (b *LeaseCandidateApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go similarity index 75% rename from vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go rename to vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go index 61d3dca10..f52aaab24 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( coordinationv1 "k8s.io/api/coordination/v1" @@ -26,12 +26,12 @@ import ( // LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use // with apply. type LeaseCandidateSpecApplyConfiguration struct { - LeaseName *string `json:"leaseName,omitempty"` - PingTime *v1.MicroTime `json:"pingTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - BinaryVersion *string `json:"binaryVersion,omitempty"` - EmulationVersion *string `json:"emulationVersion,omitempty"` - PreferredStrategies []coordinationv1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty"` + LeaseName *string `json:"leaseName,omitempty"` + PingTime *v1.MicroTime `json:"pingTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + BinaryVersion *string `json:"binaryVersion,omitempty"` + EmulationVersion *string `json:"emulationVersion,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` } // LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with @@ -80,12 +80,10 @@ func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string return b } -// WithPreferredStrategies adds the given value to the PreferredStrategies field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the PreferredStrategies field. -func (b *LeaseCandidateSpecApplyConfiguration) WithPreferredStrategies(values ...coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration { - for i := range values { - b.PreferredStrategies = append(b.PreferredStrategies, values[i]) - } +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration { + b.Strategy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go index 9aa0703e8..b321fe6b4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go @@ -86,7 +86,7 @@ func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subreso // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Leas // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Leas // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *L // overwriting an existing map entries in Labels field with the same key. func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseAp // overwriting an existing map entries in Annotations field with the same key. func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -251,5 +251,5 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * // GetName retrieves the value of the Name field in the declarative configuration. func (b *LeaseApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go index 1d698fd61..3f7de21b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use // with apply. type AppArmorProfileApplyConfiguration struct { - Type *v1.AppArmorProfileType `json:"type,omitempty"` - LocalhostProfile *string `json:"localhostProfile,omitempty"` + Type *corev1.AppArmorProfileType `json:"type,omitempty"` + LocalhostProfile *string `json:"localhostProfile,omitempty"` } // AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with @@ -38,7 +38,7 @@ func AppArmorProfile() *AppArmorProfileApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *AppArmorProfileApplyConfiguration) WithType(value v1.AppArmorProfileType) *AppArmorProfileApplyConfiguration { +func (b *AppArmorProfileApplyConfiguration) WithType(value corev1.AppArmorProfileType) *AppArmorProfileApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go index e4c2fff3f..2c76161a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use // with apply. type AttachedVolumeApplyConfiguration struct { - Name *v1.UniqueVolumeName `json:"name,omitempty"` - DevicePath *string `json:"devicePath,omitempty"` + Name *corev1.UniqueVolumeName `json:"name,omitempty"` + DevicePath *string `json:"devicePath,omitempty"` } // AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with @@ -38,7 +38,7 @@ func AttachedVolume() *AttachedVolumeApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *AttachedVolumeApplyConfiguration) WithName(value v1.UniqueVolumeName) *AttachedVolumeApplyConfiguration { +func (b *AttachedVolumeApplyConfiguration) WithName(value corev1.UniqueVolumeName) *AttachedVolumeApplyConfiguration { b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go index 40ad5ac78..d4d20dfa9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use // with apply. type AzureDiskVolumeSourceApplyConfiguration struct { - DiskName *string `json:"diskName,omitempty"` - DataDiskURI *string `json:"diskURI,omitempty"` - CachingMode *v1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"` - FSType *string `json:"fsType,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - Kind *v1.AzureDataDiskKind `json:"kind,omitempty"` + DiskName *string `json:"diskName,omitempty"` + DataDiskURI *string `json:"diskURI,omitempty"` + CachingMode *corev1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"` + FSType *string `json:"fsType,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + Kind *corev1.AzureDataDiskKind `json:"kind,omitempty"` } // AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with @@ -58,7 +58,7 @@ func (b *AzureDiskVolumeSourceApplyConfiguration) WithDataDiskURI(value string) // WithCachingMode sets the CachingMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CachingMode field is set to the value of the last call. -func (b *AzureDiskVolumeSourceApplyConfiguration) WithCachingMode(value v1.AzureDataDiskCachingMode) *AzureDiskVolumeSourceApplyConfiguration { +func (b *AzureDiskVolumeSourceApplyConfiguration) WithCachingMode(value corev1.AzureDataDiskCachingMode) *AzureDiskVolumeSourceApplyConfiguration { b.CachingMode = &value return b } @@ -82,7 +82,7 @@ func (b *AzureDiskVolumeSourceApplyConfiguration) WithReadOnly(value bool) *Azur // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *AzureDiskVolumeSourceApplyConfiguration) WithKind(value v1.AzureDataDiskKind) *AzureDiskVolumeSourceApplyConfiguration { +func (b *AzureDiskVolumeSourceApplyConfiguration) WithKind(value corev1.AzureDataDiskKind) *AzureDiskVolumeSourceApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go index 1c463aef5..e5c52b3c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use // with apply. type CapabilitiesApplyConfiguration struct { - Add []v1.Capability `json:"add,omitempty"` - Drop []v1.Capability `json:"drop,omitempty"` + Add []corev1.Capability `json:"add,omitempty"` + Drop []corev1.Capability `json:"drop,omitempty"` } // CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with @@ -38,7 +38,7 @@ func Capabilities() *CapabilitiesApplyConfiguration { // WithAdd adds the given value to the Add field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Add field. -func (b *CapabilitiesApplyConfiguration) WithAdd(values ...v1.Capability) *CapabilitiesApplyConfiguration { +func (b *CapabilitiesApplyConfiguration) WithAdd(values ...corev1.Capability) *CapabilitiesApplyConfiguration { for i := range values { b.Add = append(b.Add, values[i]) } @@ -48,7 +48,7 @@ func (b *CapabilitiesApplyConfiguration) WithAdd(values ...v1.Capability) *Capab // WithDrop adds the given value to the Drop field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Drop field. -func (b *CapabilitiesApplyConfiguration) WithDrop(values ...v1.Capability) *CapabilitiesApplyConfiguration { +func (b *CapabilitiesApplyConfiguration) WithDrop(values ...corev1.Capability) *CapabilitiesApplyConfiguration { for i := range values { b.Drop = append(b.Drop, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go index bcfbac63e..ab1c578c8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use // with apply. type ClusterTrustBundleProjectionApplyConfiguration struct { - Name *string `json:"name,omitempty"` - SignerName *string `json:"signerName,omitempty"` - LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` - Optional *bool `json:"optional,omitempty"` - Path *string `json:"path,omitempty"` + Name *string `json:"name,omitempty"` + SignerName *string `json:"signerName,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Optional *bool `json:"optional,omitempty"` + Path *string `json:"path,omitempty"` } // ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with @@ -57,7 +57,7 @@ func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value st // WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LabelSelector field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { b.LabelSelector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go index 0044c7c0b..60be6fe80 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use // with apply. type ComponentConditionApplyConfiguration struct { - Type *v1.ComponentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - Message *string `json:"message,omitempty"` - Error *string `json:"error,omitempty"` + Type *corev1.ComponentConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + Message *string `json:"message,omitempty"` + Error *string `json:"error,omitempty"` } // ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with @@ -40,7 +40,7 @@ func ComponentCondition() *ComponentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ComponentConditionApplyConfiguration) WithType(value v1.ComponentConditionType) *ComponentConditionApplyConfiguration { +func (b *ComponentConditionApplyConfiguration) WithType(value corev1.ComponentConditionType) *ComponentConditionApplyConfiguration { b.Type = &value return b } @@ -48,7 +48,7 @@ func (b *ComponentConditionApplyConfiguration) WithType(value v1.ComponentCondit // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ComponentConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ComponentConditionApplyConfiguration { +func (b *ComponentConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *ComponentConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go index 195bde721..340a55e2d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use // with apply. type ComponentStatusApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` } // ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with @@ -56,18 +56,18 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { +func ExtractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { return extractComponentStatus(componentStatus, fieldManager, "") } // ExtractComponentStatusStatus is the same as ExtractComponentStatus except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractComponentStatusStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { +func ExtractComponentStatusStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { return extractComponentStatus(componentStatus, fieldManager, "status") } -func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) { +func extractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) { b := &ComponentStatusApplyConfiguration{} err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldMan // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithKind(value string) *ComponentStatusApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ComponentStatusApplyConfiguration) WithKind(value string) *ComponentSta // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithAPIVersion(value string) *ComponentStatusApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ComponentStatusApplyConfiguration) WithAPIVersion(value string) *Compon // If called multiple times, the Name field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithName(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ComponentStatusApplyConfiguration) WithName(value string) *ComponentSta // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithGenerateName(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ComponentStatusApplyConfiguration) WithGenerateName(value string) *Comp // If called multiple times, the Namespace field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *Compone // If called multiple times, the UID field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithUID(value types.UID) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ComponentStatusApplyConfiguration) WithUID(value types.UID) *ComponentS // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithResourceVersion(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *ComponentStatusApplyConfiguration) WithResourceVersion(value string) *C // If called multiple times, the Generation field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithGeneration(value int64) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ComponentStatusApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ComponentStatusApplyConfiguration { +func (b *ComponentStatusApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ComponentStatusApplyConfiguration { +func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ComponentStatusApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *ComponentStatusApplyConfiguration) WithLabels(entries map[string]string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ComponentStatusApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *ComponentStatusApplyConfiguration) WithAnnotations(entries map[string]string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *ComponentStatusApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ComponentStatusApplyConfiguration { +func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *ComponentStatusApplyConfiguration) WithFinalizers(values ...string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ComponentStatusApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,5 +254,5 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC // GetName retrieves the value of the Name field in the declarative configuration. func (b *ComponentStatusApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go index 576b7a3d6..2ff2c4d20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go @@ -20,21 +20,21 @@ package v1 import ( corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use // with apply. type ConfigMapApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Immutable *bool `json:"immutable,omitempty"` - Data map[string]string `json:"data,omitempty"` - BinaryData map[string][]byte `json:"binaryData,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Immutable *bool `json:"immutable,omitempty"` + Data map[string]string `json:"data,omitempty"` + BinaryData map[string][]byte `json:"binaryData,omitempty"` } // ConfigMap constructs a declarative configuration of the ConfigMap type for use with @@ -88,7 +88,7 @@ func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithKind(value string) *ConfigMapApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ConfigMapApplyConfiguration) WithKind(value string) *ConfigMapApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithAPIVersion(value string) *ConfigMapApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ConfigMapApplyConfiguration) WithAPIVersion(value string) *ConfigMapApp // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithName(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ConfigMapApplyConfiguration) WithName(value string) *ConfigMapApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithGenerateName(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ConfigMapApplyConfiguration) WithGenerateName(value string) *ConfigMapA // If called multiple times, the Namespace field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapAppl // If called multiple times, the UID field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithUID(value types.UID) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ConfigMapApplyConfiguration) WithUID(value types.UID) *ConfigMapApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithResourceVersion(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *ConfigMapApplyConfiguration) WithResourceVersion(value string) *ConfigM // If called multiple times, the Generation field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithGeneration(value int64) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ConfigMapApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConfigMapApplyConfiguration { +func (b *ConfigMapApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConfigMapApplyConfiguration { +func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ConfigMapApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *ConfigMapApplyConfiguration) WithLabels(entries map[string]string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ConfigMapApplyConfiguration) WithLabels(entries map[string]string) *Con // overwriting an existing map entries in Annotations field with the same key. func (b *ConfigMapApplyConfiguration) WithAnnotations(entries map[string]string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *ConfigMapApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConfigMapApplyConfiguration { +func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *ConfigMapApplyConfiguration) WithFinalizers(values ...string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ConfigMapApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -281,5 +281,5 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte) // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConfigMapApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go index b1fccd700..4c0d2cbdd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go @@ -35,7 +35,7 @@ func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapEnvSourceApplyConfiguration) WithName(value string) *ConfigMapEnvSourceApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go index 26c2a75b5..97c0e7210 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go @@ -36,7 +36,7 @@ func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapKeySelectorApplyConfiguration) WithName(value string) *ConfigMapKeySelectorApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go index 308b28f57..d8c5e21d3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go @@ -36,7 +36,7 @@ func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapProjectionApplyConfiguration) WithName(value string) *ConfigMapProjectionApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go index 8e0e8dc0f..b5f410397 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go @@ -37,7 +37,7 @@ func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapVolumeSourceApplyConfiguration) WithName(value string) *ConfigMapVolumeSourceApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go index 7acc0638f..2ad47b3a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use // with apply. type ContainerPortApplyConfiguration struct { - Name *string `json:"name,omitempty"` - HostPort *int32 `json:"hostPort,omitempty"` - ContainerPort *int32 `json:"containerPort,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - HostIP *string `json:"hostIP,omitempty"` + Name *string `json:"name,omitempty"` + HostPort *int32 `json:"hostPort,omitempty"` + ContainerPort *int32 `json:"containerPort,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + HostIP *string `json:"hostIP,omitempty"` } // ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with @@ -65,7 +65,7 @@ func (b *ContainerPortApplyConfiguration) WithContainerPort(value int32) *Contai // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *ContainerPortApplyConfiguration) WithProtocol(value v1.Protocol) *ContainerPortApplyConfiguration { +func (b *ContainerPortApplyConfiguration) WithProtocol(value corev1.Protocol) *ContainerPortApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go index ea60e3d98..d45dbceaf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use // with apply. type ContainerResizePolicyApplyConfiguration struct { - ResourceName *v1.ResourceName `json:"resourceName,omitempty"` - RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` + ResourceName *corev1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *corev1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` } // ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with @@ -38,7 +38,7 @@ func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { // WithResourceName sets the ResourceName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceName field is set to the value of the last call. -func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value corev1.ResourceName) *ContainerResizePolicyApplyConfiguration { b.ResourceName = &value return b } @@ -46,7 +46,7 @@ func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.Reso // WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RestartPolicy field is set to the value of the last call. -func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value corev1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { b.RestartPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go index 6eec9f7f2..0ed59c177 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use // with apply. type ContainerStateRunningApplyConfiguration struct { - StartedAt *v1.Time `json:"startedAt,omitempty"` + StartedAt *metav1.Time `json:"startedAt,omitempty"` } // ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with @@ -37,7 +37,7 @@ func ContainerStateRunning() *ContainerStateRunningApplyConfiguration { // WithStartedAt sets the StartedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartedAt field is set to the value of the last call. -func (b *ContainerStateRunningApplyConfiguration) WithStartedAt(value v1.Time) *ContainerStateRunningApplyConfiguration { +func (b *ContainerStateRunningApplyConfiguration) WithStartedAt(value metav1.Time) *ContainerStateRunningApplyConfiguration { b.StartedAt = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go index b067aa211..cfadd93c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use // with apply. type ContainerStateTerminatedApplyConfiguration struct { - ExitCode *int32 `json:"exitCode,omitempty"` - Signal *int32 `json:"signal,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - StartedAt *v1.Time `json:"startedAt,omitempty"` - FinishedAt *v1.Time `json:"finishedAt,omitempty"` - ContainerID *string `json:"containerID,omitempty"` + ExitCode *int32 `json:"exitCode,omitempty"` + Signal *int32 `json:"signal,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + StartedAt *metav1.Time `json:"startedAt,omitempty"` + FinishedAt *metav1.Time `json:"finishedAt,omitempty"` + ContainerID *string `json:"containerID,omitempty"` } // ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with @@ -75,7 +75,7 @@ func (b *ContainerStateTerminatedApplyConfiguration) WithMessage(value string) * // WithStartedAt sets the StartedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartedAt field is set to the value of the last call. -func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value v1.Time) *ContainerStateTerminatedApplyConfiguration { +func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value metav1.Time) *ContainerStateTerminatedApplyConfiguration { b.StartedAt = &value return b } @@ -83,7 +83,7 @@ func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value v1.Time // WithFinishedAt sets the FinishedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FinishedAt field is set to the value of the last call. -func (b *ContainerStateTerminatedApplyConfiguration) WithFinishedAt(value v1.Time) *ContainerStateTerminatedApplyConfiguration { +func (b *ContainerStateTerminatedApplyConfiguration) WithFinishedAt(value metav1.Time) *ContainerStateTerminatedApplyConfiguration { b.FinishedAt = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go index a619fdb07..63e9f56ab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" ) // EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use // with apply. type EmptyDirVolumeSourceApplyConfiguration struct { - Medium *v1.StorageMedium `json:"medium,omitempty"` - SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` + Medium *corev1.StorageMedium `json:"medium,omitempty"` + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` } // EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with @@ -39,7 +39,7 @@ func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration { // WithMedium sets the Medium field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Medium field is set to the value of the last call. -func (b *EmptyDirVolumeSourceApplyConfiguration) WithMedium(value v1.StorageMedium) *EmptyDirVolumeSourceApplyConfiguration { +func (b *EmptyDirVolumeSourceApplyConfiguration) WithMedium(value corev1.StorageMedium) *EmptyDirVolumeSourceApplyConfiguration { b.Medium = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go index d0d96230c..05ee64ddc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Port *int32 `json:"port,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - AppProtocol *string `json:"appProtocol,omitempty"` + Name *string `json:"name,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + AppProtocol *string `json:"appProtocol,omitempty"` } // EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with @@ -56,7 +56,7 @@ func (b *EndpointPortApplyConfiguration) WithPort(value int32) *EndpointPortAppl // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *EndpointPortApplyConfiguration) WithProtocol(value v1.Protocol) *EndpointPortApplyConfiguration { +func (b *EndpointPortApplyConfiguration) WithProtocol(value corev1.Protocol) *EndpointPortApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go index 98dc69aaa..d2f910196 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use // with apply. type EndpointsApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` } // Endpoints constructs a declarative configuration of the Endpoints type for use with @@ -57,18 +57,18 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { +func ExtractEndpoints(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { return extractEndpoints(endpoints, fieldManager, "") } // ExtractEndpointsStatus is the same as ExtractEndpoints except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEndpointsStatus(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { +func ExtractEndpointsStatus(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { return extractEndpoints(endpoints, fieldManager, "status") } -func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) { +func extractEndpoints(endpoints *corev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) { b := &EndpointsApplyConfiguration{} err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithKind(value string) *EndpointsApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *EndpointsApplyConfiguration) WithKind(value string) *EndpointsApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithAPIVersion(value string) *EndpointsApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *EndpointsApplyConfiguration) WithAPIVersion(value string) *EndpointsApp // If called multiple times, the Name field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithName(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *EndpointsApplyConfiguration) WithName(value string) *EndpointsApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithGenerateName(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *EndpointsApplyConfiguration) WithGenerateName(value string) *EndpointsA // If called multiple times, the Namespace field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsAppl // If called multiple times, the UID field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithUID(value types.UID) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *EndpointsApplyConfiguration) WithUID(value types.UID) *EndpointsApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithResourceVersion(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *EndpointsApplyConfiguration) WithResourceVersion(value string) *Endpoin // If called multiple times, the Generation field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithGeneration(value int64) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EndpointsApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointsApplyConfiguration { +func (b *EndpointsApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointsApplyConfiguration { +func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *EndpointsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *EndpointsApplyConfiguration) WithLabels(entries map[string]string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *EndpointsApplyConfiguration) WithLabels(entries map[string]string) *End // overwriting an existing map entries in Annotations field with the same key. func (b *EndpointsApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *EndpointsApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EndpointsApplyConfiguration { +func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *EndpointsApplyConfiguration) WithFinalizers(values ...string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EndpointsApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,5 +256,5 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *EndpointsApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index a15ac6ec3..4b74439fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -39,7 +39,7 @@ func EphemeralContainer() *EphemeralContainerApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithName(value string) *EphemeralContainerApplyConfiguration { - b.Name = &value + b.EphemeralContainerCommonApplyConfiguration.Name = &value return b } @@ -47,7 +47,7 @@ func (b *EphemeralContainerApplyConfiguration) WithName(value string) *Ephemeral // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Image field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithImage(value string) *EphemeralContainerApplyConfiguration { - b.Image = &value + b.EphemeralContainerCommonApplyConfiguration.Image = &value return b } @@ -56,7 +56,7 @@ func (b *EphemeralContainerApplyConfiguration) WithImage(value string) *Ephemera // If called multiple times, values provided by each call will be appended to the Command field. func (b *EphemeralContainerApplyConfiguration) WithCommand(values ...string) *EphemeralContainerApplyConfiguration { for i := range values { - b.Command = append(b.Command, values[i]) + b.EphemeralContainerCommonApplyConfiguration.Command = append(b.EphemeralContainerCommonApplyConfiguration.Command, values[i]) } return b } @@ -66,7 +66,7 @@ func (b *EphemeralContainerApplyConfiguration) WithCommand(values ...string) *Ep // If called multiple times, values provided by each call will be appended to the Args field. func (b *EphemeralContainerApplyConfiguration) WithArgs(values ...string) *EphemeralContainerApplyConfiguration { for i := range values { - b.Args = append(b.Args, values[i]) + b.EphemeralContainerCommonApplyConfiguration.Args = append(b.EphemeralContainerCommonApplyConfiguration.Args, values[i]) } return b } @@ -75,7 +75,7 @@ func (b *EphemeralContainerApplyConfiguration) WithArgs(values ...string) *Ephem // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WorkingDir field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithWorkingDir(value string) *EphemeralContainerApplyConfiguration { - b.WorkingDir = &value + b.EphemeralContainerCommonApplyConfiguration.WorkingDir = &value return b } @@ -87,7 +87,7 @@ func (b *EphemeralContainerApplyConfiguration) WithPorts(values ...*ContainerPor if values[i] == nil { panic("nil value passed to WithPorts") } - b.Ports = append(b.Ports, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.Ports = append(b.EphemeralContainerCommonApplyConfiguration.Ports, *values[i]) } return b } @@ -100,7 +100,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnvFrom(values ...*EnvFromSou if values[i] == nil { panic("nil value passed to WithEnvFrom") } - b.EnvFrom = append(b.EnvFrom, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.EnvFrom = append(b.EphemeralContainerCommonApplyConfiguration.EnvFrom, *values[i]) } return b } @@ -113,7 +113,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnv(values ...*EnvVarApplyCon if values[i] == nil { panic("nil value passed to WithEnv") } - b.Env = append(b.Env, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.Env = append(b.EphemeralContainerCommonApplyConfiguration.Env, *values[i]) } return b } @@ -122,7 +122,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnv(values ...*EnvVarApplyCon // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resources field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.Resources = value + b.EphemeralContainerCommonApplyConfiguration.Resources = value return b } @@ -134,7 +134,7 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta if values[i] == nil { panic("nil value passed to WithResizePolicy") } - b.ResizePolicy = append(b.ResizePolicy, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.ResizePolicy = append(b.EphemeralContainerCommonApplyConfiguration.ResizePolicy, *values[i]) } return b } @@ -143,7 +143,7 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RestartPolicy field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerApplyConfiguration { - b.RestartPolicy = &value + b.EphemeralContainerCommonApplyConfiguration.RestartPolicy = &value return b } @@ -155,7 +155,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeMounts(values ...*Volum if values[i] == nil { panic("nil value passed to WithVolumeMounts") } - b.VolumeMounts = append(b.VolumeMounts, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.VolumeMounts = append(b.EphemeralContainerCommonApplyConfiguration.VolumeMounts, *values[i]) } return b } @@ -168,7 +168,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeDevices(values ...*Volu if values[i] == nil { panic("nil value passed to WithVolumeDevices") } - b.VolumeDevices = append(b.VolumeDevices, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.VolumeDevices = append(b.EphemeralContainerCommonApplyConfiguration.VolumeDevices, *values[i]) } return b } @@ -177,7 +177,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeDevices(values ...*Volu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LivenessProbe field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithLivenessProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.LivenessProbe = value + b.EphemeralContainerCommonApplyConfiguration.LivenessProbe = value return b } @@ -185,7 +185,7 @@ func (b *EphemeralContainerApplyConfiguration) WithLivenessProbe(value *ProbeApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadinessProbe field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithReadinessProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.ReadinessProbe = value + b.EphemeralContainerCommonApplyConfiguration.ReadinessProbe = value return b } @@ -193,7 +193,7 @@ func (b *EphemeralContainerApplyConfiguration) WithReadinessProbe(value *ProbeAp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartupProbe field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithStartupProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.StartupProbe = value + b.EphemeralContainerCommonApplyConfiguration.StartupProbe = value return b } @@ -201,7 +201,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStartupProbe(value *ProbeAppl // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Lifecycle field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithLifecycle(value *LifecycleApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.Lifecycle = value + b.EphemeralContainerCommonApplyConfiguration.Lifecycle = value return b } @@ -209,7 +209,7 @@ func (b *EphemeralContainerApplyConfiguration) WithLifecycle(value *LifecycleApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TerminationMessagePath field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePath(value string) *EphemeralContainerApplyConfiguration { - b.TerminationMessagePath = &value + b.EphemeralContainerCommonApplyConfiguration.TerminationMessagePath = &value return b } @@ -217,7 +217,7 @@ func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePath(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TerminationMessagePolicy field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePolicy(value corev1.TerminationMessagePolicy) *EphemeralContainerApplyConfiguration { - b.TerminationMessagePolicy = &value + b.EphemeralContainerCommonApplyConfiguration.TerminationMessagePolicy = &value return b } @@ -225,7 +225,7 @@ func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePolicy(valu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ImagePullPolicy field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *EphemeralContainerApplyConfiguration { - b.ImagePullPolicy = &value + b.EphemeralContainerCommonApplyConfiguration.ImagePullPolicy = &value return b } @@ -233,7 +233,7 @@ func (b *EphemeralContainerApplyConfiguration) WithImagePullPolicy(value corev1. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SecurityContext field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithSecurityContext(value *SecurityContextApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.SecurityContext = value + b.EphemeralContainerCommonApplyConfiguration.SecurityContext = value return b } @@ -241,7 +241,7 @@ func (b *EphemeralContainerApplyConfiguration) WithSecurityContext(value *Securi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Stdin field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithStdin(value bool) *EphemeralContainerApplyConfiguration { - b.Stdin = &value + b.EphemeralContainerCommonApplyConfiguration.Stdin = &value return b } @@ -249,7 +249,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStdin(value bool) *EphemeralC // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StdinOnce field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithStdinOnce(value bool) *EphemeralContainerApplyConfiguration { - b.StdinOnce = &value + b.EphemeralContainerCommonApplyConfiguration.StdinOnce = &value return b } @@ -257,7 +257,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStdinOnce(value bool) *Epheme // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TTY field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithTTY(value bool) *EphemeralContainerApplyConfiguration { - b.TTY = &value + b.EphemeralContainerCommonApplyConfiguration.TTY = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go index 65d6577ab..9496ea773 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go @@ -19,33 +19,33 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - Source *EventSourceApplyConfiguration `json:"source,omitempty"` - FirstTimestamp *metav1.Time `json:"firstTimestamp,omitempty"` - LastTimestamp *metav1.Time `json:"lastTimestamp,omitempty"` - Count *int32 `json:"count,omitempty"` - Type *string `json:"type,omitempty"` - EventTime *metav1.MicroTime `json:"eventTime,omitempty"` - Series *EventSeriesApplyConfiguration `json:"series,omitempty"` - Action *string `json:"action,omitempty"` - Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"` - ReportingController *string `json:"reportingComponent,omitempty"` - ReportingInstance *string `json:"reportingInstance,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + Source *EventSourceApplyConfiguration `json:"source,omitempty"` + FirstTimestamp *apismetav1.Time `json:"firstTimestamp,omitempty"` + LastTimestamp *apismetav1.Time `json:"lastTimestamp,omitempty"` + Count *int32 `json:"count,omitempty"` + Type *string `json:"type,omitempty"` + EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"` + Series *EventSeriesApplyConfiguration `json:"series,omitempty"` + Action *string `json:"action,omitempty"` + Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"` + ReportingController *string `json:"reportingComponent,omitempty"` + ReportingInstance *string `json:"reportingInstance,omitempty"` } // Event constructs a declarative configuration of the Event type for use with @@ -70,18 +70,18 @@ func Event(name, namespace string) *EventApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEvent(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEvent(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "") } // ExtractEventStatus is the same as ExtractEvent except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEventStatus(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEventStatus(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "status") } -func extractEvent(event *apicorev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { +func extractEvent(event *corev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b, subresource) if err != nil { @@ -99,7 +99,7 @@ func extractEvent(event *apicorev1.Event, fieldManager string, subresource strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -107,7 +107,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -116,7 +116,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -125,7 +125,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -134,7 +134,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -143,7 +143,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -152,7 +152,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -161,25 +161,25 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -188,7 +188,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -198,11 +198,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E // overwriting an existing map entries in Labels field with the same key. func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -213,11 +213,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp // overwriting an existing map entries in Annotations field with the same key. func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -225,13 +225,13 @@ func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *Ev // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -242,14 +242,14 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -288,7 +288,7 @@ func (b *EventApplyConfiguration) WithSource(value *EventSourceApplyConfiguratio // WithFirstTimestamp sets the FirstTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FirstTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithFirstTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithFirstTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.FirstTimestamp = &value return b } @@ -296,7 +296,7 @@ func (b *EventApplyConfiguration) WithFirstTimestamp(value metav1.Time) *EventAp // WithLastTimestamp sets the LastTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithLastTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithLastTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.LastTimestamp = &value return b } @@ -320,7 +320,7 @@ func (b *EventApplyConfiguration) WithType(value string) *EventApplyConfiguratio // WithEventTime sets the EventTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EventTime field is set to the value of the last call. -func (b *EventApplyConfiguration) WithEventTime(value metav1.MicroTime) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithEventTime(value apismetav1.MicroTime) *EventApplyConfiguration { b.EventTime = &value return b } @@ -368,5 +368,5 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl // GetName retrieves the value of the Name field in the declarative configuration. func (b *EventApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go index 18069c0d1..c90954bcc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { - Count *int32 `json:"count,omitempty"` - LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` + Count *int32 `json:"count,omitempty"` + LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"` } // EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with @@ -46,7 +46,7 @@ func (b *EventSeriesApplyConfiguration) WithCount(value int32) *EventSeriesApply // WithLastObservedTime sets the LastObservedTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastObservedTime field is set to the value of the last call. -func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value v1.MicroTime) *EventSeriesApplyConfiguration { +func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value metav1.MicroTime) *EventSeriesApplyConfiguration { b.LastObservedTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go index 10dfedfde..6a41d67cd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use // with apply. type HostPathVolumeSourceApplyConfiguration struct { - Path *string `json:"path,omitempty"` - Type *v1.HostPathType `json:"type,omitempty"` + Path *string `json:"path,omitempty"` + Type *corev1.HostPathType `json:"type,omitempty"` } // HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with @@ -46,7 +46,7 @@ func (b *HostPathVolumeSourceApplyConfiguration) WithPath(value string) *HostPat // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HostPathVolumeSourceApplyConfiguration) WithType(value v1.HostPathType) *HostPathVolumeSourceApplyConfiguration { +func (b *HostPathVolumeSourceApplyConfiguration) WithType(value corev1.HostPathType) *HostPathVolumeSourceApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go index 5ecbc27fe..ca61c5ae2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) @@ -29,7 +29,7 @@ type HTTPGetActionApplyConfiguration struct { Path *string `json:"path,omitempty"` Port *intstr.IntOrString `json:"port,omitempty"` Host *string `json:"host,omitempty"` - Scheme *v1.URIScheme `json:"scheme,omitempty"` + Scheme *corev1.URIScheme `json:"scheme,omitempty"` HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"` } @@ -66,7 +66,7 @@ func (b *HTTPGetActionApplyConfiguration) WithHost(value string) *HTTPGetActionA // WithScheme sets the Scheme field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scheme field is set to the value of the last call. -func (b *HTTPGetActionApplyConfiguration) WithScheme(value v1.URIScheme) *HTTPGetActionApplyConfiguration { +func (b *HTTPGetActionApplyConfiguration) WithScheme(value corev1.URIScheme) *HTTPGetActionApplyConfiguration { b.Scheme = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go index 340f15040..9a146e685 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use // with apply. type ImageVolumeSourceApplyConfiguration struct { - Reference *string `json:"reference,omitempty"` - PullPolicy *v1.PullPolicy `json:"pullPolicy,omitempty"` + Reference *string `json:"reference,omitempty"` + PullPolicy *corev1.PullPolicy `json:"pullPolicy,omitempty"` } // ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with @@ -46,7 +46,7 @@ func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *Image // WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PullPolicy field is set to the value of the last call. -func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value v1.PullPolicy) *ImageVolumeSourceApplyConfiguration { +func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value corev1.PullPolicy) *ImageVolumeSourceApplyConfiguration { b.PullPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go index 7770200a0..517cc4cd3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use // with apply. type LimitRangeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` } // LimitRange constructs a declarative configuration of the LimitRange type for use with @@ -57,18 +57,18 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { +func ExtractLimitRange(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { return extractLimitRange(limitRange, fieldManager, "") } // ExtractLimitRangeStatus is the same as ExtractLimitRange except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractLimitRangeStatus(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { +func ExtractLimitRangeStatus(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { return extractLimitRange(limitRange, fieldManager, "status") } -func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) { +func extractLimitRange(limitRange *corev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) { b := &LimitRangeApplyConfiguration{} err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithKind(value string) *LimitRangeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LimitRangeApplyConfiguration) WithKind(value string) *LimitRangeApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithAPIVersion(value string) *LimitRangeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LimitRangeApplyConfiguration) WithAPIVersion(value string) *LimitRangeA // If called multiple times, the Name field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithName(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LimitRangeApplyConfiguration) WithName(value string) *LimitRangeApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithGenerateName(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LimitRangeApplyConfiguration) WithGenerateName(value string) *LimitRang // If called multiple times, the Namespace field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeAp // If called multiple times, the UID field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithUID(value types.UID) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LimitRangeApplyConfiguration) WithUID(value types.UID) *LimitRangeApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithResourceVersion(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *LimitRangeApplyConfiguration) WithResourceVersion(value string) *LimitR // If called multiple times, the Generation field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithGeneration(value int64) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *LimitRangeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LimitRangeApplyConfiguration { +func (b *LimitRangeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LimitRangeApplyConfiguration { +func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LimitRangeApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *LimitRangeApplyConfiguration) WithLabels(entries map[string]string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LimitRangeApplyConfiguration) WithLabels(entries map[string]string) *Li // overwriting an existing map entries in Annotations field with the same key. func (b *LimitRangeApplyConfiguration) WithAnnotations(entries map[string]string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *LimitRangeApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LimitRangeApplyConfiguration { +func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *LimitRangeApplyConfiguration) WithFinalizers(values ...string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *LimitRangeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -251,5 +251,5 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig // GetName retrieves the value of the Name field in the declarative configuration. func (b *LimitRangeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go index 61d8344e8..5ad8ac0e6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use // with apply. type LimitRangeItemApplyConfiguration struct { - Type *v1.LimitType `json:"type,omitempty"` - Max *v1.ResourceList `json:"max,omitempty"` - Min *v1.ResourceList `json:"min,omitempty"` - Default *v1.ResourceList `json:"default,omitempty"` - DefaultRequest *v1.ResourceList `json:"defaultRequest,omitempty"` - MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"` + Type *corev1.LimitType `json:"type,omitempty"` + Max *corev1.ResourceList `json:"max,omitempty"` + Min *corev1.ResourceList `json:"min,omitempty"` + Default *corev1.ResourceList `json:"default,omitempty"` + DefaultRequest *corev1.ResourceList `json:"defaultRequest,omitempty"` + MaxLimitRequestRatio *corev1.ResourceList `json:"maxLimitRequestRatio,omitempty"` } // LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with @@ -42,7 +42,7 @@ func LimitRangeItem() *LimitRangeItemApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithType(value v1.LimitType) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithType(value corev1.LimitType) *LimitRangeItemApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *LimitRangeItemApplyConfiguration) WithType(value v1.LimitType) *LimitRa // WithMax sets the Max field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Max field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithMax(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithMax(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.Max = &value return b } @@ -58,7 +58,7 @@ func (b *LimitRangeItemApplyConfiguration) WithMax(value v1.ResourceList) *Limit // WithMin sets the Min field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Min field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithMin(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithMin(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.Min = &value return b } @@ -66,7 +66,7 @@ func (b *LimitRangeItemApplyConfiguration) WithMin(value v1.ResourceList) *Limit // WithDefault sets the Default field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Default field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithDefault(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithDefault(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.Default = &value return b } @@ -74,7 +74,7 @@ func (b *LimitRangeItemApplyConfiguration) WithDefault(value v1.ResourceList) *L // WithDefaultRequest sets the DefaultRequest field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DefaultRequest field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.DefaultRequest = &value return b } @@ -82,7 +82,7 @@ func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value v1.ResourceL // WithMaxLimitRequestRatio sets the MaxLimitRequestRatio field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MaxLimitRequestRatio field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithMaxLimitRequestRatio(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithMaxLimitRequestRatio(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.MaxLimitRequestRatio = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index 1a7d99815..ae5c410a2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use @@ -27,7 +27,7 @@ import ( type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostname *string `json:"hostname,omitempty"` - IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"` + IPMode *corev1.LoadBalancerIPMode `json:"ipMode,omitempty"` Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } @@ -56,7 +56,7 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load // WithIPMode sets the IPMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IPMode field is set to the value of the last call. -func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { +func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value corev1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { b.IPMode = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go index 704c32165..9a1a6af2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use // with apply. type ModifyVolumeStatusApplyConfiguration struct { - TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` - Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` + TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` + Status *corev1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` } // ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with @@ -46,7 +46,7 @@ func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassNa // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { +func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value corev1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go index 0b77af183..0aba283ce 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use // with apply. type NamespaceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"` - Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"` + Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` } // Namespace constructs a declarative configuration of the Namespace type for use with @@ -57,18 +57,18 @@ func Namespace(name string) *NamespaceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNamespace(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { +func ExtractNamespace(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { return extractNamespace(namespace, fieldManager, "") } // ExtractNamespaceStatus is the same as ExtractNamespace except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNamespaceStatus(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { +func ExtractNamespaceStatus(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { return extractNamespace(namespace, fieldManager, "status") } -func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) { +func extractNamespace(namespace *corev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) { b := &NamespaceApplyConfiguration{} err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithKind(value string) *NamespaceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *NamespaceApplyConfiguration) WithKind(value string) *NamespaceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithAPIVersion(value string) *NamespaceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *NamespaceApplyConfiguration) WithAPIVersion(value string) *NamespaceApp // If called multiple times, the Name field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithName(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *NamespaceApplyConfiguration) WithName(value string) *NamespaceApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithGenerateName(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *NamespaceApplyConfiguration) WithGenerateName(value string) *NamespaceA // If called multiple times, the Namespace field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceAppl // If called multiple times, the UID field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithUID(value types.UID) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *NamespaceApplyConfiguration) WithUID(value types.UID) *NamespaceApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithResourceVersion(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *NamespaceApplyConfiguration) WithResourceVersion(value string) *Namespa // If called multiple times, the Generation field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithGeneration(value int64) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NamespaceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NamespaceApplyConfiguration { +func (b *NamespaceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NamespaceApplyConfiguration { +func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *NamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *NamespaceApplyConfiguration) WithLabels(entries map[string]string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *NamespaceApplyConfiguration) WithLabels(entries map[string]string) *Nam // overwriting an existing map entries in Annotations field with the same key. func (b *NamespaceApplyConfiguration) WithAnnotations(entries map[string]string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *NamespaceApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NamespaceApplyConfiguration { +func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *NamespaceApplyConfiguration) WithFinalizers(values ...string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NamespaceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf // GetName retrieves the value of the Name field in the declarative configuration. func (b *NamespaceApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go index 9784c3e6f..82b4cc1ca 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use // with apply. type NamespaceConditionApplyConfiguration struct { - Type *v1.NamespaceConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.NamespaceConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with @@ -42,7 +42,7 @@ func NamespaceCondition() *NamespaceConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NamespaceConditionApplyConfiguration) WithType(value v1.NamespaceConditionType) *NamespaceConditionApplyConfiguration { +func (b *NamespaceConditionApplyConfiguration) WithType(value corev1.NamespaceConditionType) *NamespaceConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *NamespaceConditionApplyConfiguration) WithType(value v1.NamespaceCondit // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *NamespaceConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NamespaceConditionApplyConfiguration { +func (b *NamespaceConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *NamespaceConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go index 6d7b7f1f9..1f8fcaf9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use // with apply. type NamespaceSpecApplyConfiguration struct { - Finalizers []v1.FinalizerName `json:"finalizers,omitempty"` + Finalizers []corev1.FinalizerName `json:"finalizers,omitempty"` } // NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with @@ -37,7 +37,7 @@ func NamespaceSpec() *NamespaceSpecApplyConfiguration { // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *NamespaceSpecApplyConfiguration) WithFinalizers(values ...v1.FinalizerName) *NamespaceSpecApplyConfiguration { +func (b *NamespaceSpecApplyConfiguration) WithFinalizers(values ...corev1.FinalizerName) *NamespaceSpecApplyConfiguration { for i := range values { b.Finalizers = append(b.Finalizers, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go index 314908109..1484be684 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use // with apply. type NamespaceStatusApplyConfiguration struct { - Phase *v1.NamespacePhase `json:"phase,omitempty"` + Phase *corev1.NamespacePhase `json:"phase,omitempty"` Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"` } @@ -38,7 +38,7 @@ func NamespaceStatus() *NamespaceStatusApplyConfiguration { // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *NamespaceStatusApplyConfiguration) WithPhase(value v1.NamespacePhase) *NamespaceStatusApplyConfiguration { +func (b *NamespaceStatusApplyConfiguration) WithPhase(value corev1.NamespacePhase) *NamespaceStatusApplyConfiguration { b.Phase = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go index ef1339259..d365047b7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` - Status *NodeStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } // Node constructs a declarative configuration of the Node type for use with @@ -57,18 +57,18 @@ func Node(name string) *NodeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNode(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { +func ExtractNode(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { return extractNode(node, fieldManager, "") } // ExtractNodeStatus is the same as ExtractNode except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNodeStatus(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { +func ExtractNodeStatus(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { return extractNode(node, fieldManager, "status") } -func extractNode(node *apicorev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { +func extractNode(node *corev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { b := &NodeApplyConfiguration{} err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractNode(node *apicorev1.Node, fieldManager string, subresource string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *NodeApplyConfiguration) WithGeneration(value int64) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *No // overwriting an existing map entries in Labels field with the same key. func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeAppl // overwriting an existing map entries in Annotations field with the same key. func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *Nod // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) // GetName retrieves the value of the Name field in the declarative configuration. func (b *NodeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go index a9cb036c5..779fe0e2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use // with apply. type NodeAddressApplyConfiguration struct { - Type *v1.NodeAddressType `json:"type,omitempty"` - Address *string `json:"address,omitempty"` + Type *corev1.NodeAddressType `json:"type,omitempty"` + Address *string `json:"address,omitempty"` } // NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with @@ -38,7 +38,7 @@ func NodeAddress() *NodeAddressApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NodeAddressApplyConfiguration) WithType(value v1.NodeAddressType) *NodeAddressApplyConfiguration { +func (b *NodeAddressApplyConfiguration) WithType(value corev1.NodeAddressType) *NodeAddressApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go index a1b8ed0f3..e3a2d3bb0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use // with apply. type NodeConditionApplyConfiguration struct { - Type *v1.NodeConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.NodeConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with @@ -43,7 +43,7 @@ func NodeCondition() *NodeConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NodeConditionApplyConfiguration) WithType(value v1.NodeConditionType) *NodeConditionApplyConfiguration { +func (b *NodeConditionApplyConfiguration) WithType(value corev1.NodeConditionType) *NodeConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *NodeConditionApplyConfiguration) WithType(value v1.NodeConditionType) * // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *NodeConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NodeConditionApplyConfiguration { +func (b *NodeConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *NodeConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go index 7c383e06c..4dcbc9a2e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use // with apply. type NodeSelectorRequirementApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Operator *v1.NodeSelectorOperator `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` + Key *string `json:"key,omitempty"` + Operator *corev1.NodeSelectorOperator `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` } // NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with @@ -47,7 +47,7 @@ func (b *NodeSelectorRequirementApplyConfiguration) WithKey(value string) *NodeS // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *NodeSelectorRequirementApplyConfiguration) WithOperator(value v1.NodeSelectorOperator) *NodeSelectorRequirementApplyConfiguration { +func (b *NodeSelectorRequirementApplyConfiguration) WithOperator(value corev1.NodeSelectorOperator) *NodeSelectorRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go index 8411c57ac..3859ccd50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Allocatable *v1.ResourceList `json:"allocatable,omitempty"` - Phase *v1.NodePhase `json:"phase,omitempty"` + Capacity *corev1.ResourceList `json:"capacity,omitempty"` + Allocatable *corev1.ResourceList `json:"allocatable,omitempty"` + Phase *corev1.NodePhase `json:"phase,omitempty"` Conditions []NodeConditionApplyConfiguration `json:"conditions,omitempty"` Addresses []NodeAddressApplyConfiguration `json:"addresses,omitempty"` DaemonEndpoints *NodeDaemonEndpointsApplyConfiguration `json:"daemonEndpoints,omitempty"` NodeInfo *NodeSystemInfoApplyConfiguration `json:"nodeInfo,omitempty"` Images []ContainerImageApplyConfiguration `json:"images,omitempty"` - VolumesInUse []v1.UniqueVolumeName `json:"volumesInUse,omitempty"` + VolumesInUse []corev1.UniqueVolumeName `json:"volumesInUse,omitempty"` VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"` Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"` RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"` @@ -49,7 +49,7 @@ func NodeStatus() *NodeStatusApplyConfiguration { // WithCapacity sets the Capacity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Capacity field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithCapacity(value corev1.ResourceList) *NodeStatusApplyConfiguration { b.Capacity = &value return b } @@ -57,7 +57,7 @@ func (b *NodeStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *Node // WithAllocatable sets the Allocatable field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Allocatable field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithAllocatable(value corev1.ResourceList) *NodeStatusApplyConfiguration { b.Allocatable = &value return b } @@ -65,7 +65,7 @@ func (b *NodeStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *N // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithPhase(value v1.NodePhase) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithPhase(value corev1.NodePhase) *NodeStatusApplyConfiguration { b.Phase = &value return b } @@ -128,7 +128,7 @@ func (b *NodeStatusApplyConfiguration) WithImages(values ...*ContainerImageApply // WithVolumesInUse adds the given value to the VolumesInUse field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumesInUse field. -func (b *NodeStatusApplyConfiguration) WithVolumesInUse(values ...v1.UniqueVolumeName) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithVolumesInUse(values ...corev1.UniqueVolumeName) *NodeStatusApplyConfiguration { for i := range values { b.VolumesInUse = append(b.VolumesInUse, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go index 020f87411..6840c1c88 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use // with apply. type PersistentVolumeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"` - Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"` + Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` } // PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with @@ -57,18 +57,18 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { +func ExtractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { return extractPersistentVolume(persistentVolume, fieldManager, "") } // ExtractPersistentVolumeStatus is the same as ExtractPersistentVolume except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPersistentVolumeStatus(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { +func ExtractPersistentVolumeStatus(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { return extractPersistentVolume(persistentVolume, fieldManager, "status") } -func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) { +func extractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) { b := &PersistentVolumeApplyConfiguration{} err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, field // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithKind(value string) *PersistentVolumeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PersistentVolumeApplyConfiguration) WithKind(value string) *PersistentV // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithAPIVersion(value string) *PersistentVolumeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PersistentVolumeApplyConfiguration) WithAPIVersion(value string) *Persi // If called multiple times, the Name field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithName(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PersistentVolumeApplyConfiguration) WithName(value string) *PersistentV // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithGenerateName(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PersistentVolumeApplyConfiguration) WithGenerateName(value string) *Per // If called multiple times, the Namespace field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *Persis // If called multiple times, the UID field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithUID(value types.UID) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PersistentVolumeApplyConfiguration) WithUID(value types.UID) *Persisten // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *PersistentVolumeApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithGeneration(value int64) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PersistentVolumeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeApplyConfiguration { +func (b *PersistentVolumeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeApplyConfiguration { +func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PersistentVolumeApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *PersistentVolumeApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PersistentVolumeApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *PersistentVolumeApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *PersistentVolumeApplyConfiguration) WithAnnotations(entries map[string] // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeApplyConfiguration { +func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *PersistentVolumeApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PersistentVolumeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS // GetName retrieves the value of the Name field in the declarative configuration. func (b *PersistentVolumeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go index 81cf79144..93b8b69d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use // with apply. type PersistentVolumeClaimApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` - Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` } // PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with @@ -58,18 +58,18 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { +func ExtractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "") } // ExtractPersistentVolumeClaimStatus is the same as ExtractPersistentVolumeClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { +func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "status") } -func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) { +func extractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) { b := &PersistentVolumeClaimApplyConfiguration{} err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithKind(value string) *PersistentVolumeClaimApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithKind(value string) *Persis // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) *PersistentVolumeClaimApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithName(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithName(value string) *Persis // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithGenerateName(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *P // If called multiple times, the UID field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithUID(value types.UID) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithUID(value types.UID) *Pers // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithGeneration(value int64) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeClaimApplyConfiguration { +func (b *PersistentVolumeClaimApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeClaimApplyConfiguration { +func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *PersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *PersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimApplyConfiguration { +func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ... func (b *PersistentVolumeClaimApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PersistentVolumeClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo // GetName retrieves the value of the Name field in the declarative configuration. func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go index 80038c067..40025d533 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use // with apply. type PersistentVolumeClaimConditionApplyConfiguration struct { - Type *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.PersistentVolumeClaimConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with @@ -43,7 +43,7 @@ func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value v1.PersistentVolumeClaimConditionType) *PersistentVolumeClaimConditionApplyConfiguration { +func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value corev1.PersistentVolumeClaimConditionType) *PersistentVolumeClaimConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value v1.Per // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PersistentVolumeClaimConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PersistentVolumeClaimConditionApplyConfiguration { +func (b *PersistentVolumeClaimConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PersistentVolumeClaimConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index 5ce671cd9..2c2be16b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"` VolumeName *string `json:"volumeName,omitempty"` StorageClassName *string `json:"storageClassName,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` @@ -46,7 +46,7 @@ func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration { // WithAccessModes adds the given value to the AccessModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeClaimSpecApplyConfiguration { for i := range values { b.AccessModes = append(b.AccessModes, values[i]) } @@ -88,7 +88,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithStorageClassName(value // WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VolumeMode field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeMode(value corev1.PersistentVolumeMode) *PersistentVolumeClaimSpecApplyConfiguration { b.VolumeMode = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 3eebf95ad..6cea23a2c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` + Phase *corev1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *corev1.ResourceList `json:"capacity,omitempty"` Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[corev1.ResourceName]corev1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"` ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } @@ -44,7 +44,7 @@ func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguratio // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value v1.PersistentVolumeClaimPhase) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value corev1.PersistentVolumeClaimPhase) *PersistentVolumeClaimStatusApplyConfiguration { b.Phase = &value return b } @@ -52,7 +52,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value v1.Persi // WithAccessModes adds the given value to the AccessModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeClaimStatusApplyConfiguration { for i := range values { b.AccessModes = append(b.AccessModes, values[i]) } @@ -62,7 +62,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values . // WithCapacity sets the Capacity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Capacity field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCapacity(value corev1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { b.Capacity = &value return b } @@ -83,7 +83,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithConditions(values .. // WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AllocatedResources field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { b.AllocatedResources = &value return b } @@ -92,9 +92,9 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(v // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the AllocatedResourceStatuses field, // overwriting an existing map entries in AllocatedResourceStatuses field with the same key. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[v1.ResourceName]v1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[corev1.ResourceName]corev1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration { if b.AllocatedResourceStatuses == nil && len(entries) > 0 { - b.AllocatedResourceStatuses = make(map[v1.ResourceName]v1.ClaimResourceStatus, len(entries)) + b.AllocatedResourceStatuses = make(map[corev1.ResourceName]corev1.ClaimResourceStatus, len(entries)) } for k, v := range entries { b.AllocatedResourceStatuses[k] = v diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go index ed4970291..4db3cbf12 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use // with apply. type PersistentVolumeClaimTemplateApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` } // PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with @@ -42,7 +42,7 @@ func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithName(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithName(value string) // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGenerateName(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGenerateName(value // If called multiple times, the Namespace field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value st // If called multiple times, the UID field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithUID(value types.UID) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithUID(value types.UI // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,25 +87,25 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithResourceVersion(va // If called multiple times, the Generation field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGeneration(value int64) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { +func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { +func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp( // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionGracePerio // overwriting an existing map entries in Labels field with the same key. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithLabels(entries map // overwriting an existing map entries in Annotations field with the same key. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -151,13 +151,13 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithAnnotations(entrie // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimTemplateApplyConfiguration { +func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,14 +168,14 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(va func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PersistentVolumeClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -190,5 +190,5 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis // GetName retrieves the value of the Name field in the declarative configuration. func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index 074fa55d1..792e3b944 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use // with apply. type PersistentVolumeSpecApplyConfiguration struct { - Capacity *v1.ResourceList `json:"capacity,omitempty"` + Capacity *corev1.ResourceList `json:"capacity,omitempty"` PersistentVolumeSourceApplyConfiguration `json:",inline"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` ClaimRef *ObjectReferenceApplyConfiguration `json:"claimRef,omitempty"` - PersistentVolumeReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` + PersistentVolumeReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` StorageClassName *string `json:"storageClassName,omitempty"` MountOptions []string `json:"mountOptions,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } @@ -46,7 +46,7 @@ func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration { // WithCapacity sets the Capacity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Capacity field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value v1.ResourceList) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value corev1.ResourceList) *PersistentVolumeSpecApplyConfiguration { b.Capacity = &value return b } @@ -55,7 +55,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value v1.ResourceL // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GCEPersistentDisk field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.GCEPersistentDisk = value + b.PersistentVolumeSourceApplyConfiguration.GCEPersistentDisk = value return b } @@ -63,7 +63,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithGCEPersistentDisk(value *GC // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AWSElasticBlockStore field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlockStoreVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.AWSElasticBlockStore = value + b.PersistentVolumeSourceApplyConfiguration.AWSElasticBlockStore = value return b } @@ -71,7 +71,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAWSElasticBlockStore(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HostPath field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.HostPath = value + b.PersistentVolumeSourceApplyConfiguration.HostPath = value return b } @@ -79,7 +79,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithHostPath(value *HostPathVol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Glusterfs field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithGlusterfs(value *GlusterfsPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Glusterfs = value + b.PersistentVolumeSourceApplyConfiguration.Glusterfs = value return b } @@ -87,7 +87,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithGlusterfs(value *GlusterfsP // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NFS field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.NFS = value + b.PersistentVolumeSourceApplyConfiguration.NFS = value return b } @@ -95,7 +95,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNFS(value *NFSVolumeSourceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RBD field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithRBD(value *RBDPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.RBD = value + b.PersistentVolumeSourceApplyConfiguration.RBD = value return b } @@ -103,7 +103,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithRBD(value *RBDPersistentVol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ISCSI field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithISCSI(value *ISCSIPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.ISCSI = value + b.PersistentVolumeSourceApplyConfiguration.ISCSI = value return b } @@ -111,7 +111,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithISCSI(value *ISCSIPersisten // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Cinder field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithCinder(value *CinderPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Cinder = value + b.PersistentVolumeSourceApplyConfiguration.Cinder = value return b } @@ -119,7 +119,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCinder(value *CinderPersist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CephFS field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithCephFS(value *CephFSPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.CephFS = value + b.PersistentVolumeSourceApplyConfiguration.CephFS = value return b } @@ -127,7 +127,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCephFS(value *CephFSPersist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FC field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.FC = value + b.PersistentVolumeSourceApplyConfiguration.FC = value return b } @@ -135,7 +135,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFC(value *FCVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Flocker field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Flocker = value + b.PersistentVolumeSourceApplyConfiguration.Flocker = value return b } @@ -143,7 +143,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFlocker(value *FlockerVolum // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FlexVolume field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithFlexVolume(value *FlexPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.FlexVolume = value + b.PersistentVolumeSourceApplyConfiguration.FlexVolume = value return b } @@ -151,7 +151,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFlexVolume(value *FlexPersi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureFile field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithAzureFile(value *AzureFilePersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.AzureFile = value + b.PersistentVolumeSourceApplyConfiguration.AzureFile = value return b } @@ -159,7 +159,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAzureFile(value *AzureFileP // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VsphereVolume field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.VsphereVolume = value + b.PersistentVolumeSourceApplyConfiguration.VsphereVolume = value return b } @@ -167,7 +167,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithVsphereVolume(value *Vspher // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Quobyte field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Quobyte = value + b.PersistentVolumeSourceApplyConfiguration.Quobyte = value return b } @@ -175,7 +175,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithQuobyte(value *QuobyteVolum // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureDisk field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.AzureDisk = value + b.PersistentVolumeSourceApplyConfiguration.AzureDisk = value return b } @@ -183,7 +183,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAzureDisk(value *AzureDiskV // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PhotonPersistentDisk field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersistentDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.PhotonPersistentDisk = value + b.PersistentVolumeSourceApplyConfiguration.PhotonPersistentDisk = value return b } @@ -191,7 +191,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithPhotonPersistentDisk(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PortworxVolume field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.PortworxVolume = value + b.PersistentVolumeSourceApplyConfiguration.PortworxVolume = value return b } @@ -199,7 +199,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithPortworxVolume(value *Portw // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ScaleIO field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithScaleIO(value *ScaleIOPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.ScaleIO = value + b.PersistentVolumeSourceApplyConfiguration.ScaleIO = value return b } @@ -207,7 +207,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithScaleIO(value *ScaleIOPersi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Local field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithLocal(value *LocalVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Local = value + b.PersistentVolumeSourceApplyConfiguration.Local = value return b } @@ -215,7 +215,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithLocal(value *LocalVolumeSou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StorageOS field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithStorageOS(value *StorageOSPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.StorageOS = value + b.PersistentVolumeSourceApplyConfiguration.StorageOS = value return b } @@ -223,14 +223,14 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithStorageOS(value *StorageOSP // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CSI field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithCSI(value *CSIPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.CSI = value + b.PersistentVolumeSourceApplyConfiguration.CSI = value return b } // WithAccessModes adds the given value to the AccessModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *PersistentVolumeSpecApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeSpecApplyConfiguration { for i := range values { b.AccessModes = append(b.AccessModes, values[i]) } @@ -248,7 +248,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithClaimRef(value *ObjectRefer // WithPersistentVolumeReclaimPolicy sets the PersistentVolumeReclaimPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PersistentVolumeReclaimPolicy field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithPersistentVolumeReclaimPolicy(value v1.PersistentVolumeReclaimPolicy) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithPersistentVolumeReclaimPolicy(value corev1.PersistentVolumeReclaimPolicy) *PersistentVolumeSpecApplyConfiguration { b.PersistentVolumeReclaimPolicy = &value return b } @@ -274,7 +274,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithMountOptions(values ...stri // WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VolumeMode field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeMode(value corev1.PersistentVolumeMode) *PersistentVolumeSpecApplyConfiguration { b.VolumeMode = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go index 95ba90f48..0bb077ae0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use // with apply. type PersistentVolumeStatusApplyConfiguration struct { - Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` - Message *string `json:"message,omitempty"` - Reason *string `json:"reason,omitempty"` - LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` + Phase *corev1.PersistentVolumePhase `json:"phase,omitempty"` + Message *string `json:"message,omitempty"` + Reason *string `json:"reason,omitempty"` + LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` } // PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with @@ -41,7 +41,7 @@ func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration { // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value v1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration { +func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value corev1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration { b.Phase = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go index 507d57d6f..29526709f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodApplyConfiguration represents a declarative configuration of the Pod type for use // with apply. type PodApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodStatusApplyConfiguration `json:"status,omitempty"` } // Pod constructs a declarative configuration of the Pod type for use with @@ -58,18 +58,18 @@ func Pod(name, namespace string) *PodApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPod(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { +func ExtractPod(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { return extractPod(pod, fieldManager, "") } // ExtractPodStatus is the same as ExtractPod except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodStatus(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { +func ExtractPodStatus(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { return extractPod(pod, fieldManager, "status") } -func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) { +func extractPod(pod *corev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) { b := &PodApplyConfiguration{} err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodApplyConfiguration) WithKind(value string) *PodApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PodApplyConfiguration) WithKind(value string) *PodApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodApplyConfiguration) WithAPIVersion(value string) *PodApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PodApplyConfiguration) WithAPIVersion(value string) *PodApplyConfigurat // If called multiple times, the Name field is set to the value of the last call. func (b *PodApplyConfiguration) WithName(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PodApplyConfiguration) WithName(value string) *PodApplyConfiguration { // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodApplyConfiguration) WithGenerateName(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PodApplyConfiguration) WithGenerateName(value string) *PodApplyConfigur // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfigurati // If called multiple times, the UID field is set to the value of the last call. func (b *PodApplyConfiguration) WithUID(value types.UID) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PodApplyConfiguration) WithUID(value types.UID) *PodApplyConfiguration // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodApplyConfiguration) WithResourceVersion(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *PodApplyConfiguration) WithResourceVersion(value string) *PodApplyConfi // If called multiple times, the Generation field is set to the value of the last call. func (b *PodApplyConfiguration) WithGeneration(value int64) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodApplyConfiguration { +func (b *PodApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodApplyConfiguration { +func (b *PodApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PodApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodApp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PodApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Pod // overwriting an existing map entries in Labels field with the same key. func (b *PodApplyConfiguration) WithLabels(entries map[string]string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PodApplyConfiguration) WithLabels(entries map[string]string) *PodApplyC // overwriting an existing map entries in Annotations field with the same key. func (b *PodApplyConfiguration) WithAnnotations(entries map[string]string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PodApplyConfiguration) WithAnnotations(entries map[string]string) *PodA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodApplyConfiguration { +func (b *PodApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *PodApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference func (b *PodApplyConfiguration) WithFinalizers(values ...string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) * // GetName retrieves the value of the Name field in the declarative configuration. func (b *PodApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index 3afce026d..1cc1ca0d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use // with apply. type PodAffinityTermApplyConfiguration struct { - LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` - Namespaces []string `json:"namespaces,omitempty"` - TopologyKey *string `json:"topologyKey,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` - MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` + TopologyKey *string `json:"topologyKey,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } // PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with @@ -42,7 +42,7 @@ func PodAffinityTerm() *PodAffinityTermApplyConfiguration { // WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LabelSelector field is set to the value of the last call. -func (b *PodAffinityTermApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { +func (b *PodAffinityTermApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { b.LabelSelector = value return b } @@ -68,7 +68,7 @@ func (b *PodAffinityTermApplyConfiguration) WithTopologyKey(value string) *PodAf // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { +func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { b.NamespaceSelector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go index 98968d26d..67cd1bd09 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use // with apply. type PodConditionApplyConfiguration struct { - Type *v1.PodConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.PodConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with @@ -43,7 +43,7 @@ func PodCondition() *PodConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PodConditionApplyConfiguration) WithType(value v1.PodConditionType) *PodConditionApplyConfiguration { +func (b *PodConditionApplyConfiguration) WithType(value corev1.PodConditionType) *PodConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *PodConditionApplyConfiguration) WithType(value v1.PodConditionType) *Po // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PodConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodConditionApplyConfiguration { +func (b *PodConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PodConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go index 7f156f817..22a745601 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use // with apply. type PodOSApplyConfiguration struct { - Name *v1.OSName `json:"name,omitempty"` + Name *corev1.OSName `json:"name,omitempty"` } // PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with @@ -37,7 +37,7 @@ func PodOS() *PodOSApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodOSApplyConfiguration) WithName(value v1.OSName) *PodOSApplyConfiguration { +func (b *PodOSApplyConfiguration) WithName(value corev1.OSName) *PodOSApplyConfiguration { b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go index 09746df1b..4298b1ca6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use // with apply. type PodReadinessGateApplyConfiguration struct { - ConditionType *v1.PodConditionType `json:"conditionType,omitempty"` + ConditionType *corev1.PodConditionType `json:"conditionType,omitempty"` } // PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with @@ -37,7 +37,7 @@ func PodReadinessGate() *PodReadinessGateApplyConfiguration { // WithConditionType sets the ConditionType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConditionType field is set to the value of the last call. -func (b *PodReadinessGateApplyConfiguration) WithConditionType(value v1.PodConditionType) *PodReadinessGateApplyConfiguration { +func (b *PodReadinessGateApplyConfiguration) WithConditionType(value corev1.PodConditionType) *PodReadinessGateApplyConfiguration { b.ConditionType = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go index 55085e630..f0a3e662c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go @@ -37,6 +37,7 @@ type PodSecurityContextApplyConfiguration struct { FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` + SELinuxChangePolicy *corev1.PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty"` } // PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with @@ -147,3 +148,11 @@ func (b *PodSecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArm b.AppArmorProfile = value return b } + +// WithSELinuxChangePolicy sets the SELinuxChangePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxChangePolicy field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithSELinuxChangePolicy(value corev1.PodSELinuxChangePolicy) *PodSecurityContextApplyConfiguration { + b.SELinuxChangePolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index 8134e044f..96f6eb94b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -64,6 +64,7 @@ type PodSpecApplyConfiguration struct { HostUsers *bool `json:"hostUsers,omitempty"` SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"` ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` } // PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with @@ -444,3 +445,11 @@ func (b *PodSpecApplyConfiguration) WithResourceClaims(values ...*PodResourceCla } return b } + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *PodSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PodSpecApplyConfiguration { + b.Resources = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 0b68996cd..b79e1210a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use // with apply. type PodStatusApplyConfiguration struct { - Phase *v1.PodPhase `json:"phase,omitempty"` + Phase *corev1.PodPhase `json:"phase,omitempty"` Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"` Message *string `json:"message,omitempty"` Reason *string `json:"reason,omitempty"` @@ -38,9 +38,9 @@ type PodStatusApplyConfiguration struct { StartTime *metav1.Time `json:"startTime,omitempty"` InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"` ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` - QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` + QOSClass *corev1.PodQOSClass `json:"qosClass,omitempty"` EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` - Resize *v1.PodResizeStatus `json:"resize,omitempty"` + Resize *corev1.PodResizeStatus `json:"resize,omitempty"` ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"` } @@ -53,7 +53,7 @@ func PodStatus() *PodStatusApplyConfiguration { // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *PodStatusApplyConfiguration) WithPhase(value v1.PodPhase) *PodStatusApplyConfiguration { +func (b *PodStatusApplyConfiguration) WithPhase(value corev1.PodPhase) *PodStatusApplyConfiguration { b.Phase = &value return b } @@ -174,7 +174,7 @@ func (b *PodStatusApplyConfiguration) WithContainerStatuses(values ...*Container // WithQOSClass sets the QOSClass field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the QOSClass field is set to the value of the last call. -func (b *PodStatusApplyConfiguration) WithQOSClass(value v1.PodQOSClass) *PodStatusApplyConfiguration { +func (b *PodStatusApplyConfiguration) WithQOSClass(value corev1.PodQOSClass) *PodStatusApplyConfiguration { b.QOSClass = &value return b } @@ -195,7 +195,7 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* // WithResize sets the Resize field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resize field is set to the value of the last call. -func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { +func (b *PodStatusApplyConfiguration) WithResize(value corev1.PodResizeStatus) *PodStatusApplyConfiguration { b.Resize = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go index b4c8a658a..7886ea2d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use // with apply. type PodTemplateApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } // PodTemplate constructs a declarative configuration of the PodTemplate type for use with @@ -57,18 +57,18 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { +func ExtractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { return extractPodTemplate(podTemplate, fieldManager, "") } // ExtractPodTemplateStatus is the same as ExtractPodTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodTemplateStatus(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { +func ExtractPodTemplateStatus(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { return extractPodTemplate(podTemplate, fieldManager, "status") } -func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) { +func extractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) { b := &PodTemplateApplyConfiguration{} err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithKind(value string) *PodTemplateApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *PodTemplateApplyConfiguration) WithKind(value string) *PodTemplateApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithAPIVersion(value string) *PodTemplateApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *PodTemplateApplyConfiguration) WithAPIVersion(value string) *PodTemplat // If called multiple times, the Name field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithName(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *PodTemplateApplyConfiguration) WithName(value string) *PodTemplateApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithGenerateName(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *PodTemplateApplyConfiguration) WithGenerateName(value string) *PodTempl // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplate // If called multiple times, the UID field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithUID(value types.UID) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *PodTemplateApplyConfiguration) WithUID(value types.UID) *PodTemplateApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithResourceVersion(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *PodTemplateApplyConfiguration) WithResourceVersion(value string) *PodTe // If called multiple times, the Generation field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithGeneration(value int64) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodTemplateApplyConfiguration { +func (b *PodTemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodTemplateApplyConfiguration { +func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *PodTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *PodTemplateApplyConfiguration) WithLabels(entries map[string]string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *PodTemplateApplyConfiguration) WithLabels(entries map[string]string) *P // overwriting an existing map entries in Annotations field with the same key. func (b *PodTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *PodTemplateApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodTemplateApplyConfiguration { +func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *PodTemplateApplyConfiguration) WithFinalizers(values ...string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -251,5 +251,5 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *PodTemplateApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go index 6146c01c7..2e0904a24 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use // with apply. type PodTemplateSpecApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` } // PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with @@ -42,7 +42,7 @@ func PodTemplateSpec() *PodTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithName(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithName(value string) *PodTemplateS // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithGenerateName(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithGenerateName(value string) *PodT // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemp // If called multiple times, the UID field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithUID(value types.UID) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithUID(value types.UID) *PodTemplat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithResourceVersion(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,25 +87,25 @@ func (b *PodTemplateSpecApplyConfiguration) WithResourceVersion(value string) *P // If called multiple times, the Generation field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithGeneration(value int64) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodTemplateSpecApplyConfiguration { +func (b *PodTemplateSpecApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodTemplateSpecApplyConfiguration { +func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *PodTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *PodTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *PodTemplateSpecApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *PodTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -151,13 +151,13 @@ func (b *PodTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodTemplateSpecApplyConfiguration { +func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,14 +168,14 @@ func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *PodTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -190,5 +190,5 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *PodTemplateSpecApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go index 5e738cabd..eff8fc2ac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use // with apply. type PortStatusApplyConfiguration struct { - Port *int32 `json:"port,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - Error *string `json:"error,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` } // PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with @@ -47,7 +47,7 @@ func (b *PortStatusApplyConfiguration) WithPort(value int32) *PortStatusApplyCon // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *PortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *PortStatusApplyConfiguration { +func (b *PortStatusApplyConfiguration) WithProtocol(value corev1.Protocol) *PortStatusApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index 3be1c9650..d6c654689 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -40,7 +40,7 @@ func Probe() *ProbeApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Exec field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *ProbeApplyConfiguration { - b.Exec = value + b.ProbeHandlerApplyConfiguration.Exec = value return b } @@ -48,7 +48,7 @@ func (b *ProbeApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTPGet field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *ProbeApplyConfiguration { - b.HTTPGet = value + b.ProbeHandlerApplyConfiguration.HTTPGet = value return b } @@ -56,7 +56,7 @@ func (b *ProbeApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfigura // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TCPSocket field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *ProbeApplyConfiguration { - b.TCPSocket = value + b.ProbeHandlerApplyConfiguration.TCPSocket = value return b } @@ -64,7 +64,7 @@ func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GRPC field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeApplyConfiguration { - b.GRPC = value + b.ProbeHandlerApplyConfiguration.GRPC = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go index b28f422dc..4ef551914 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use // with apply. type ReplicationControllerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"` - Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` } // ReplicationController constructs a declarative configuration of the ReplicationController type for use with @@ -58,18 +58,18 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { +func ExtractReplicationController(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { return extractReplicationController(replicationController, fieldManager, "") } // ExtractReplicationControllerStatus is the same as ExtractReplicationController except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractReplicationControllerStatus(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { +func ExtractReplicationControllerStatus(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { return extractReplicationController(replicationController, fieldManager, "status") } -func extractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) { +func extractReplicationController(replicationController *corev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) { b := &ReplicationControllerApplyConfiguration{} err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractReplicationController(replicationController *apicorev1.ReplicationCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithKind(value string) *ReplicationControllerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicationControllerApplyConfiguration) WithKind(value string) *Replic // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithAPIVersion(value string) *ReplicationControllerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicationControllerApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithName(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicationControllerApplyConfiguration) WithName(value string) *Replic // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithGenerateName(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicationControllerApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *R // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithUID(value types.UID) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicationControllerApplyConfiguration) WithUID(value types.UID) *Repl // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithResourceVersion(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ReplicationControllerApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithGeneration(value int64) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ReplicationControllerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicationControllerApplyConfiguration { +func (b *ReplicationControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicationControllerApplyConfiguration { +func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicationControllerApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *ReplicationControllerApplyConfiguration) WithLabels(entries map[string]string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicationControllerApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicationControllerApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ReplicationControllerApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ReplicationControllerApplyConfiguration { +func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ... func (b *ReplicationControllerApplyConfiguration) WithFinalizers(values ...string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ReplicationControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC // GetName retrieves the value of the Name field in the declarative configuration. func (b *ReplicationControllerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go index 0d74c1db9..dfcecc053 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use // with apply. type ReplicationControllerConditionApplyConfiguration struct { - Type *v1.ReplicationControllerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.ReplicationControllerConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with @@ -42,7 +42,7 @@ func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicationControllerConditionApplyConfiguration) WithType(value v1.ReplicationControllerConditionType) *ReplicationControllerConditionApplyConfiguration { +func (b *ReplicationControllerConditionApplyConfiguration) WithType(value corev1.ReplicationControllerConditionType) *ReplicationControllerConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *ReplicationControllerConditionApplyConfiguration) WithType(value v1.Rep // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ReplicationControllerConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ReplicationControllerConditionApplyConfiguration { +func (b *ReplicationControllerConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *ReplicationControllerConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go index 5169cb4bc..0338780b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use // with apply. type ResourceHealthApplyConfiguration struct { - ResourceID *v1.ResourceID `json:"resourceID,omitempty"` - Health *v1.ResourceHealthStatus `json:"health,omitempty"` + ResourceID *corev1.ResourceID `json:"resourceID,omitempty"` + Health *corev1.ResourceHealthStatus `json:"health,omitempty"` } // ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with @@ -38,7 +38,7 @@ func ResourceHealth() *ResourceHealthApplyConfiguration { // WithResourceID sets the ResourceID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceID field is set to the value of the last call. -func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *ResourceHealthApplyConfiguration { +func (b *ResourceHealthApplyConfiguration) WithResourceID(value corev1.ResourceID) *ResourceHealthApplyConfiguration { b.ResourceID = &value return b } @@ -46,7 +46,7 @@ func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) * // WithHealth sets the Health field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Health field is set to the value of the last call. -func (b *ResourceHealthApplyConfiguration) WithHealth(value v1.ResourceHealthStatus) *ResourceHealthApplyConfiguration { +func (b *ResourceHealthApplyConfiguration) WithHealth(value corev1.ResourceHealthStatus) *ResourceHealthApplyConfiguration { b.Health = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go index 2b78ba703..cd67f104c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use // with apply. type ResourceQuotaApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"` - Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` } // ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with @@ -58,18 +58,18 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { +func ExtractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { return extractResourceQuota(resourceQuota, fieldManager, "") } // ExtractResourceQuotaStatus is the same as ExtractResourceQuota except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceQuotaStatus(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { +func ExtractResourceQuotaStatus(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { return extractResourceQuota(resourceQuota, fieldManager, "status") } -func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) { +func extractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) { b := &ResourceQuotaApplyConfiguration{} err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithKind(value string) *ResourceQuotaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ResourceQuotaApplyConfiguration) WithKind(value string) *ResourceQuotaA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithAPIVersion(value string) *ResourceQuotaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ResourceQuotaApplyConfiguration) WithAPIVersion(value string) *Resource // If called multiple times, the Name field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithName(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ResourceQuotaApplyConfiguration) WithName(value string) *ResourceQuotaA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithGenerateName(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ResourceQuotaApplyConfiguration) WithGenerateName(value string) *Resour // If called multiple times, the Namespace field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQ // If called multiple times, the UID field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithUID(value types.UID) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ResourceQuotaApplyConfiguration) WithUID(value types.UID) *ResourceQuot // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithResourceVersion(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ResourceQuotaApplyConfiguration) WithResourceVersion(value string) *Res // If called multiple times, the Generation field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithGeneration(value int64) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceQuotaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceQuotaApplyConfiguration { +func (b *ResourceQuotaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceQuotaApplyConfiguration { +func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *ResourceQuotaApplyConfiguration) WithLabels(entries map[string]string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ResourceQuotaApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceQuotaApplyConfiguration { +func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *ResourceQuotaApplyConfiguration) WithFinalizers(values ...string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ResourceQuotaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA // GetName retrieves the value of the Name field in the declarative configuration. func (b *ResourceQuotaApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go index 0012ace25..36d342fcd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use // with apply. type ResourceQuotaSpecApplyConfiguration struct { - Hard *v1.ResourceList `json:"hard,omitempty"` - Scopes []v1.ResourceQuotaScope `json:"scopes,omitempty"` + Hard *corev1.ResourceList `json:"hard,omitempty"` + Scopes []corev1.ResourceQuotaScope `json:"scopes,omitempty"` ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"` } @@ -39,7 +39,7 @@ func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration { // WithHard sets the Hard field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Hard field is set to the value of the last call. -func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value v1.ResourceList) *ResourceQuotaSpecApplyConfiguration { +func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value corev1.ResourceList) *ResourceQuotaSpecApplyConfiguration { b.Hard = &value return b } @@ -47,7 +47,7 @@ func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value v1.ResourceList) *R // WithScopes adds the given value to the Scopes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Scopes field. -func (b *ResourceQuotaSpecApplyConfiguration) WithScopes(values ...v1.ResourceQuotaScope) *ResourceQuotaSpecApplyConfiguration { +func (b *ResourceQuotaSpecApplyConfiguration) WithScopes(values ...corev1.ResourceQuotaScope) *ResourceQuotaSpecApplyConfiguration { for i := range values { b.Scopes = append(b.Scopes, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go index 364b96eec..6338a1308 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use // with apply. type ResourceQuotaStatusApplyConfiguration struct { - Hard *v1.ResourceList `json:"hard,omitempty"` - Used *v1.ResourceList `json:"used,omitempty"` + Hard *corev1.ResourceList `json:"hard,omitempty"` + Used *corev1.ResourceList `json:"used,omitempty"` } // ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with @@ -38,7 +38,7 @@ func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration { // WithHard sets the Hard field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Hard field is set to the value of the last call. -func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value v1.ResourceList) *ResourceQuotaStatusApplyConfiguration { +func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value corev1.ResourceList) *ResourceQuotaStatusApplyConfiguration { b.Hard = &value return b } @@ -46,7 +46,7 @@ func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value v1.ResourceList) // WithUsed sets the Used field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Used field is set to the value of the last call. -func (b *ResourceQuotaStatusApplyConfiguration) WithUsed(value v1.ResourceList) *ResourceQuotaStatusApplyConfiguration { +func (b *ResourceQuotaStatusApplyConfiguration) WithUsed(value corev1.ResourceList) *ResourceQuotaStatusApplyConfiguration { b.Used = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go index 51197862c..ea77647a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use // with apply. type ResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` + Limits *corev1.ResourceList `json:"limits,omitempty"` + Requests *corev1.ResourceList `json:"requests,omitempty"` Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"` } @@ -39,7 +39,7 @@ func ResourceRequirements() *ResourceRequirementsApplyConfiguration { // WithLimits sets the Limits field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Limits field is set to the value of the last call. -func (b *ResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *ResourceRequirementsApplyConfiguration { +func (b *ResourceRequirementsApplyConfiguration) WithLimits(value corev1.ResourceList) *ResourceRequirementsApplyConfiguration { b.Limits = &value return b } @@ -47,7 +47,7 @@ func (b *ResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceLis // WithRequests sets the Requests field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Requests field is set to the value of the last call. -func (b *ResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *ResourceRequirementsApplyConfiguration { +func (b *ResourceRequirementsApplyConfiguration) WithRequests(value corev1.ResourceList) *ResourceRequirementsApplyConfiguration { b.Requests = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go index 1e63c87f8..e99586659 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use // with apply. type ResourceStatusApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` + Name *corev1.ResourceName `json:"name,omitempty"` Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"` } @@ -38,7 +38,7 @@ func ResourceStatus() *ResourceStatusApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceStatusApplyConfiguration { +func (b *ResourceStatusApplyConfiguration) WithName(value corev1.ResourceName) *ResourceStatusApplyConfiguration { b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go index c6ec87827..c2481f490 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use // with apply. type ScopedResourceSelectorRequirementApplyConfiguration struct { - ScopeName *v1.ResourceQuotaScope `json:"scopeName,omitempty"` - Operator *v1.ScopeSelectorOperator `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` + ScopeName *corev1.ResourceQuotaScope `json:"scopeName,omitempty"` + Operator *corev1.ScopeSelectorOperator `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` } // ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with @@ -39,7 +39,7 @@ func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApply // WithScopeName sets the ScopeName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ScopeName field is set to the value of the last call. -func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(value v1.ResourceQuotaScope) *ScopedResourceSelectorRequirementApplyConfiguration { +func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(value corev1.ResourceQuotaScope) *ScopedResourceSelectorRequirementApplyConfiguration { b.ScopeName = &value return b } @@ -47,7 +47,7 @@ func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(valu // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithOperator(value v1.ScopeSelectorOperator) *ScopedResourceSelectorRequirementApplyConfiguration { +func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithOperator(value corev1.ScopeSelectorOperator) *ScopedResourceSelectorRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go index eb3077a05..754bfd1b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use // with apply. type SeccompProfileApplyConfiguration struct { - Type *v1.SeccompProfileType `json:"type,omitempty"` - LocalhostProfile *string `json:"localhostProfile,omitempty"` + Type *corev1.SeccompProfileType `json:"type,omitempty"` + LocalhostProfile *string `json:"localhostProfile,omitempty"` } // SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with @@ -38,7 +38,7 @@ func SeccompProfile() *SeccompProfileApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *SeccompProfileApplyConfiguration) WithType(value v1.SeccompProfileType) *SeccompProfileApplyConfiguration { +func (b *SeccompProfileApplyConfiguration) WithType(value corev1.SeccompProfileType) *SeccompProfileApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go index 1d850b00b..9c8532d20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go @@ -20,22 +20,22 @@ package v1 import ( corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // SecretApplyConfiguration represents a declarative configuration of the Secret type for use // with apply. type SecretApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Immutable *bool `json:"immutable,omitempty"` - Data map[string][]byte `json:"data,omitempty"` - StringData map[string]string `json:"stringData,omitempty"` - Type *corev1.SecretType `json:"type,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Immutable *bool `json:"immutable,omitempty"` + Data map[string][]byte `json:"data,omitempty"` + StringData map[string]string `json:"stringData,omitempty"` + Type *corev1.SecretType `json:"type,omitempty"` } // Secret constructs a declarative configuration of the Secret type for use with @@ -89,7 +89,7 @@ func extractSecret(secret *corev1.Secret, fieldManager string, subresource strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *SecretApplyConfiguration) WithKind(value string) *SecretApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -97,7 +97,7 @@ func (b *SecretApplyConfiguration) WithKind(value string) *SecretApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *SecretApplyConfiguration) WithAPIVersion(value string) *SecretApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -106,7 +106,7 @@ func (b *SecretApplyConfiguration) WithAPIVersion(value string) *SecretApplyConf // If called multiple times, the Name field is set to the value of the last call. func (b *SecretApplyConfiguration) WithName(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -115,7 +115,7 @@ func (b *SecretApplyConfiguration) WithName(value string) *SecretApplyConfigurat // If called multiple times, the GenerateName field is set to the value of the last call. func (b *SecretApplyConfiguration) WithGenerateName(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -124,7 +124,7 @@ func (b *SecretApplyConfiguration) WithGenerateName(value string) *SecretApplyCo // If called multiple times, the Namespace field is set to the value of the last call. func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -133,7 +133,7 @@ func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfi // If called multiple times, the UID field is set to the value of the last call. func (b *SecretApplyConfiguration) WithUID(value types.UID) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -142,7 +142,7 @@ func (b *SecretApplyConfiguration) WithUID(value types.UID) *SecretApplyConfigur // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *SecretApplyConfiguration) WithResourceVersion(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -151,25 +151,25 @@ func (b *SecretApplyConfiguration) WithResourceVersion(value string) *SecretAppl // If called multiple times, the Generation field is set to the value of the last call. func (b *SecretApplyConfiguration) WithGeneration(value int64) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *SecretApplyConfiguration) WithCreationTimestamp(value metav1.Time) *SecretApplyConfiguration { +func (b *SecretApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *SecretApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *SecretApplyConfiguration { +func (b *SecretApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -178,7 +178,7 @@ func (b *SecretApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Sec // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *SecretApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -188,11 +188,11 @@ func (b *SecretApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) * // overwriting an existing map entries in Labels field with the same key. func (b *SecretApplyConfiguration) WithLabels(entries map[string]string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -203,11 +203,11 @@ func (b *SecretApplyConfiguration) WithLabels(entries map[string]string) *Secret // overwriting an existing map entries in Annotations field with the same key. func (b *SecretApplyConfiguration) WithAnnotations(entries map[string]string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -215,13 +215,13 @@ func (b *SecretApplyConfiguration) WithAnnotations(entries map[string]string) *S // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *SecretApplyConfiguration { +func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -232,14 +232,14 @@ func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefere func (b *SecretApplyConfiguration) WithFinalizers(values ...string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *SecretApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -290,5 +290,5 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl // GetName retrieves the value of the Name field in the declarative configuration. func (b *SecretApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go index ba99b7f5f..d3cc9f6a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go @@ -35,7 +35,7 @@ func SecretEnvSource() *SecretEnvSourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *SecretEnvSourceApplyConfiguration) WithName(value string) *SecretEnvSourceApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go index 2d490b810..f1cd8b2d3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go @@ -36,7 +36,7 @@ func SecretKeySelector() *SecretKeySelectorApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *SecretKeySelectorApplyConfiguration) WithName(value string) *SecretKeySelectorApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go index 65ce3c66d..99fa36ecc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go @@ -36,7 +36,7 @@ func SecretProjection() *SecretProjectionApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *SecretProjectionApplyConfiguration) WithName(value string) *SecretProjectionApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go index 2dac0589d..85f6b25a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ServiceApplyConfiguration represents a declarative configuration of the Service type for use // with apply. type ServiceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"` - Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` } // Service constructs a declarative configuration of the Service type for use with @@ -58,18 +58,18 @@ func Service(name, namespace string) *ServiceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractService(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { +func ExtractService(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { return extractService(service, fieldManager, "") } // ExtractServiceStatus is the same as ExtractService except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceStatus(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { +func ExtractServiceStatus(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { return extractService(service, fieldManager, "status") } -func extractService(service *apicorev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) { +func extractService(service *corev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) { b := &ServiceApplyConfiguration{} err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractService(service *apicorev1.Service, fieldManager string, subresource // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithKind(value string) *ServiceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ServiceApplyConfiguration) WithKind(value string) *ServiceApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithAPIVersion(value string) *ServiceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ServiceApplyConfiguration) WithAPIVersion(value string) *ServiceApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithName(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ServiceApplyConfiguration) WithName(value string) *ServiceApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithGenerateName(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ServiceApplyConfiguration) WithGenerateName(value string) *ServiceApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithUID(value types.UID) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ServiceApplyConfiguration) WithUID(value types.UID) *ServiceApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithResourceVersion(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ServiceApplyConfiguration) WithResourceVersion(value string) *ServiceAp // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithGeneration(value int64) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceApplyConfiguration { +func (b *ServiceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceApplyConfiguration { +func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Se // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ServiceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *ServiceApplyConfiguration) WithLabels(entries map[string]string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ServiceApplyConfiguration) WithLabels(entries map[string]string) *Servi // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ServiceApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceApplyConfiguration { +func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *ServiceApplyConfiguration) WithFinalizers(values ...string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go index 26d33deb9..0d80ded9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use // with apply. type ServiceAccountApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"` - ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"` - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"` + ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"` + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` } // ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with @@ -59,18 +59,18 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { +func ExtractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { return extractServiceAccount(serviceAccount, fieldManager, "") } // ExtractServiceAccountStatus is the same as ExtractServiceAccount except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceAccountStatus(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { +func ExtractServiceAccountStatus(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { return extractServiceAccount(serviceAccount, fieldManager, "status") } -func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) { +func extractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) { b := &ServiceAccountApplyConfiguration{} err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithKind(value string) *ServiceAccountApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ServiceAccountApplyConfiguration) WithKind(value string) *ServiceAccoun // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithAPIVersion(value string) *ServiceAccountApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ServiceAccountApplyConfiguration) WithAPIVersion(value string) *Service // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithName(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ServiceAccountApplyConfiguration) WithName(value string) *ServiceAccoun // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithGenerateName(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ServiceAccountApplyConfiguration) WithGenerateName(value string) *Servi // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceA // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithUID(value types.UID) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ServiceAccountApplyConfiguration) WithUID(value types.UID) *ServiceAcco // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithResourceVersion(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *ServiceAccountApplyConfiguration) WithResourceVersion(value string) *Se // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithGeneration(value int64) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceAccountApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceAccountApplyConfiguration { +func (b *ServiceAccountApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceAccountApplyConfiguration { +func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ServiceAccountApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *ServiceAccountApplyConfiguration) WithLabels(entries map[string]string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ServiceAccountApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceAccountApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *ServiceAccountApplyConfiguration) WithAnnotations(entries map[string]st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceAccountApplyConfiguration { +func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *ServiceAccountApplyConfiguration) WithFinalizers(values ...string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceAccountApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -279,5 +279,5 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceAccountApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go index e889f2134..4d5774d8d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) @@ -27,7 +27,7 @@ import ( // with apply. type ServicePortApplyConfiguration struct { Name *string `json:"name,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` AppProtocol *string `json:"appProtocol,omitempty"` Port *int32 `json:"port,omitempty"` TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` @@ -51,7 +51,7 @@ func (b *ServicePortApplyConfiguration) WithName(value string) *ServicePortApply // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *ServicePortApplyConfiguration) WithProtocol(value v1.Protocol) *ServicePortApplyConfiguration { +func (b *ServicePortApplyConfiguration) WithProtocol(value corev1.Protocol) *ServicePortApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go index a34fb0552..4b9e43051 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // TaintApplyConfiguration represents a declarative configuration of the Taint type for use // with apply. type TaintApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` - Effect *v1.TaintEffect `json:"effect,omitempty"` - TimeAdded *metav1.Time `json:"timeAdded,omitempty"` + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + Effect *corev1.TaintEffect `json:"effect,omitempty"` + TimeAdded *metav1.Time `json:"timeAdded,omitempty"` } // TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with @@ -57,7 +57,7 @@ func (b *TaintApplyConfiguration) WithValue(value string) *TaintApplyConfigurati // WithEffect sets the Effect field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Effect field is set to the value of the last call. -func (b *TaintApplyConfiguration) WithEffect(value v1.TaintEffect) *TaintApplyConfiguration { +func (b *TaintApplyConfiguration) WithEffect(value corev1.TaintEffect) *TaintApplyConfiguration { b.Effect = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go index 1bcc85b65..a0a0aac00 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use // with apply. type TolerationApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Operator *v1.TolerationOperator `json:"operator,omitempty"` - Value *string `json:"value,omitempty"` - Effect *v1.TaintEffect `json:"effect,omitempty"` - TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` + Key *string `json:"key,omitempty"` + Operator *corev1.TolerationOperator `json:"operator,omitempty"` + Value *string `json:"value,omitempty"` + Effect *corev1.TaintEffect `json:"effect,omitempty"` + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` } // TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with @@ -49,7 +49,7 @@ func (b *TolerationApplyConfiguration) WithKey(value string) *TolerationApplyCon // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *TolerationApplyConfiguration) WithOperator(value v1.TolerationOperator) *TolerationApplyConfiguration { +func (b *TolerationApplyConfiguration) WithOperator(value corev1.TolerationOperator) *TolerationApplyConfiguration { b.Operator = &value return b } @@ -65,7 +65,7 @@ func (b *TolerationApplyConfiguration) WithValue(value string) *TolerationApplyC // WithEffect sets the Effect field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Effect field is set to the value of the last call. -func (b *TolerationApplyConfiguration) WithEffect(value v1.TaintEffect) *TolerationApplyConfiguration { +func (b *TolerationApplyConfiguration) WithEffect(value corev1.TaintEffect) *TolerationApplyConfiguration { b.Effect = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go index b21d23351..ab814e8e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -28,11 +28,11 @@ import ( type TopologySpreadConstraintApplyConfiguration struct { MaxSkew *int32 `json:"maxSkew,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` - WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` + WhenUnsatisfiable *corev1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` MinDomains *int32 `json:"minDomains,omitempty"` - NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` - NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` + NodeAffinityPolicy *corev1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` + NodeTaintsPolicy *corev1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` } @@ -61,7 +61,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value strin // WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenUnsatisfiable field is set to the value of the last call. -func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value v1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration { +func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration { b.WhenUnsatisfiable = &value return b } @@ -85,7 +85,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) // WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeAffinityPolicy field is set to the value of the last call. -func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { b.NodeAffinityPolicy = &value return b } @@ -93,7 +93,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(valu // WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeTaintsPolicy field is set to the value of the last call. -func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { b.NodeTaintsPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go index 9a48f8349..e47cd031d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go @@ -43,7 +43,7 @@ func (b *VolumeApplyConfiguration) WithName(value string) *VolumeApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HostPath field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.HostPath = value + b.VolumeSourceApplyConfiguration.HostPath = value return b } @@ -51,7 +51,7 @@ func (b *VolumeApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EmptyDir field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithEmptyDir(value *EmptyDirVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.EmptyDir = value + b.VolumeSourceApplyConfiguration.EmptyDir = value return b } @@ -59,7 +59,7 @@ func (b *VolumeApplyConfiguration) WithEmptyDir(value *EmptyDirVolumeSourceApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GCEPersistentDisk field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.GCEPersistentDisk = value + b.VolumeSourceApplyConfiguration.GCEPersistentDisk = value return b } @@ -67,7 +67,7 @@ func (b *VolumeApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDis // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AWSElasticBlockStore field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlockStoreVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.AWSElasticBlockStore = value + b.VolumeSourceApplyConfiguration.AWSElasticBlockStore = value return b } @@ -75,7 +75,7 @@ func (b *VolumeApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GitRepo field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithGitRepo(value *GitRepoVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.GitRepo = value + b.VolumeSourceApplyConfiguration.GitRepo = value return b } @@ -83,7 +83,7 @@ func (b *VolumeApplyConfiguration) WithGitRepo(value *GitRepoVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Secret field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithSecret(value *SecretVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Secret = value + b.VolumeSourceApplyConfiguration.Secret = value return b } @@ -91,7 +91,7 @@ func (b *VolumeApplyConfiguration) WithSecret(value *SecretVolumeSourceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NFS field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.NFS = value + b.VolumeSourceApplyConfiguration.NFS = value return b } @@ -99,7 +99,7 @@ func (b *VolumeApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ISCSI field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithISCSI(value *ISCSIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.ISCSI = value + b.VolumeSourceApplyConfiguration.ISCSI = value return b } @@ -107,7 +107,7 @@ func (b *VolumeApplyConfiguration) WithISCSI(value *ISCSIVolumeSourceApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Glusterfs field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithGlusterfs(value *GlusterfsVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Glusterfs = value + b.VolumeSourceApplyConfiguration.Glusterfs = value return b } @@ -115,7 +115,7 @@ func (b *VolumeApplyConfiguration) WithGlusterfs(value *GlusterfsVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PersistentVolumeClaim field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithPersistentVolumeClaim(value *PersistentVolumeClaimVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.PersistentVolumeClaim = value + b.VolumeSourceApplyConfiguration.PersistentVolumeClaim = value return b } @@ -123,7 +123,7 @@ func (b *VolumeApplyConfiguration) WithPersistentVolumeClaim(value *PersistentVo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RBD field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithRBD(value *RBDVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.RBD = value + b.VolumeSourceApplyConfiguration.RBD = value return b } @@ -131,7 +131,7 @@ func (b *VolumeApplyConfiguration) WithRBD(value *RBDVolumeSourceApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FlexVolume field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithFlexVolume(value *FlexVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.FlexVolume = value + b.VolumeSourceApplyConfiguration.FlexVolume = value return b } @@ -139,7 +139,7 @@ func (b *VolumeApplyConfiguration) WithFlexVolume(value *FlexVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Cinder field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithCinder(value *CinderVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Cinder = value + b.VolumeSourceApplyConfiguration.Cinder = value return b } @@ -147,7 +147,7 @@ func (b *VolumeApplyConfiguration) WithCinder(value *CinderVolumeSourceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CephFS field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithCephFS(value *CephFSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.CephFS = value + b.VolumeSourceApplyConfiguration.CephFS = value return b } @@ -155,7 +155,7 @@ func (b *VolumeApplyConfiguration) WithCephFS(value *CephFSVolumeSourceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Flocker field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Flocker = value + b.VolumeSourceApplyConfiguration.Flocker = value return b } @@ -163,7 +163,7 @@ func (b *VolumeApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DownwardAPI field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithDownwardAPI(value *DownwardAPIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.DownwardAPI = value + b.VolumeSourceApplyConfiguration.DownwardAPI = value return b } @@ -171,7 +171,7 @@ func (b *VolumeApplyConfiguration) WithDownwardAPI(value *DownwardAPIVolumeSourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FC field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.FC = value + b.VolumeSourceApplyConfiguration.FC = value return b } @@ -179,7 +179,7 @@ func (b *VolumeApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureFile field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithAzureFile(value *AzureFileVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.AzureFile = value + b.VolumeSourceApplyConfiguration.AzureFile = value return b } @@ -187,7 +187,7 @@ func (b *VolumeApplyConfiguration) WithAzureFile(value *AzureFileVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConfigMap field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithConfigMap(value *ConfigMapVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.ConfigMap = value + b.VolumeSourceApplyConfiguration.ConfigMap = value return b } @@ -195,7 +195,7 @@ func (b *VolumeApplyConfiguration) WithConfigMap(value *ConfigMapVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VsphereVolume field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.VsphereVolume = value + b.VolumeSourceApplyConfiguration.VsphereVolume = value return b } @@ -203,7 +203,7 @@ func (b *VolumeApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Quobyte field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Quobyte = value + b.VolumeSourceApplyConfiguration.Quobyte = value return b } @@ -211,7 +211,7 @@ func (b *VolumeApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureDisk field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.AzureDisk = value + b.VolumeSourceApplyConfiguration.AzureDisk = value return b } @@ -219,7 +219,7 @@ func (b *VolumeApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PhotonPersistentDisk field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersistentDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.PhotonPersistentDisk = value + b.VolumeSourceApplyConfiguration.PhotonPersistentDisk = value return b } @@ -227,7 +227,7 @@ func (b *VolumeApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Projected field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithProjected(value *ProjectedVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Projected = value + b.VolumeSourceApplyConfiguration.Projected = value return b } @@ -235,7 +235,7 @@ func (b *VolumeApplyConfiguration) WithProjected(value *ProjectedVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PortworxVolume field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.PortworxVolume = value + b.VolumeSourceApplyConfiguration.PortworxVolume = value return b } @@ -243,7 +243,7 @@ func (b *VolumeApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ScaleIO field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithScaleIO(value *ScaleIOVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.ScaleIO = value + b.VolumeSourceApplyConfiguration.ScaleIO = value return b } @@ -251,7 +251,7 @@ func (b *VolumeApplyConfiguration) WithScaleIO(value *ScaleIOVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StorageOS field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithStorageOS(value *StorageOSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.StorageOS = value + b.VolumeSourceApplyConfiguration.StorageOS = value return b } @@ -259,7 +259,7 @@ func (b *VolumeApplyConfiguration) WithStorageOS(value *StorageOSVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CSI field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithCSI(value *CSIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.CSI = value + b.VolumeSourceApplyConfiguration.CSI = value return b } @@ -267,7 +267,7 @@ func (b *VolumeApplyConfiguration) WithCSI(value *CSIVolumeSourceApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Ephemeral field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Ephemeral = value + b.VolumeSourceApplyConfiguration.Ephemeral = value return b } @@ -275,6 +275,6 @@ func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Image field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Image = value + b.VolumeSourceApplyConfiguration.Image = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go index 49f22cc4e..ccd426a0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use // with apply. type VolumeMountApplyConfiguration struct { - Name *string `json:"name,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` - MountPath *string `json:"mountPath,omitempty"` - SubPath *string `json:"subPath,omitempty"` - MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"` - SubPathExpr *string `json:"subPathExpr,omitempty"` + Name *string `json:"name,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + SubPath *string `json:"subPath,omitempty"` + MountPropagation *corev1.MountPropagationMode `json:"mountPropagation,omitempty"` + SubPathExpr *string `json:"subPathExpr,omitempty"` } // VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with @@ -59,7 +59,7 @@ func (b *VolumeMountApplyConfiguration) WithReadOnly(value bool) *VolumeMountApp // WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RecursiveReadOnly field is set to the value of the last call. -func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration { +func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value corev1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration { b.RecursiveReadOnly = &value return b } @@ -83,7 +83,7 @@ func (b *VolumeMountApplyConfiguration) WithSubPath(value string) *VolumeMountAp // WithMountPropagation sets the MountPropagation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MountPropagation field is set to the value of the last call. -func (b *VolumeMountApplyConfiguration) WithMountPropagation(value v1.MountPropagationMode) *VolumeMountApplyConfiguration { +func (b *VolumeMountApplyConfiguration) WithMountPropagation(value corev1.MountPropagationMode) *VolumeMountApplyConfiguration { b.MountPropagation = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go index a0a9b5401..f55c40723 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use // with apply. type VolumeMountStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - MountPath *string `json:"mountPath,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` } // VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with @@ -64,7 +64,7 @@ func (b *VolumeMountStatusApplyConfiguration) WithReadOnly(value bool) *VolumeMo // WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RecursiveReadOnly field is set to the value of the last call. -func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration { +func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value corev1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration { b.RecursiveReadOnly = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go index ae849f774..5c83ae6d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use // with apply. type VolumeResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` + Limits *corev1.ResourceList `json:"limits,omitempty"` + Requests *corev1.ResourceList `json:"requests,omitempty"` } // VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with @@ -38,7 +38,7 @@ func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration // WithLimits sets the Limits field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Limits field is set to the value of the last call. -func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { +func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value corev1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { b.Limits = &value return b } @@ -46,7 +46,7 @@ func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.Resou // WithRequests sets the Requests field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Requests field is set to the value of the last call. -func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { +func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value corev1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { b.Requests = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go index 12908deb6..b55c868cb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - Port *int32 `json:"port,omitempty"` - AppProtocol *string `json:"appProtocol,omitempty"` + Name *string `json:"name,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + Port *int32 `json:"port,omitempty"` + AppProtocol *string `json:"appProtocol,omitempty"` } // EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with @@ -48,7 +48,7 @@ func (b *EndpointPortApplyConfiguration) WithName(value string) *EndpointPortApp // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *EndpointPortApplyConfiguration) WithProtocol(value v1.Protocol) *EndpointPortApplyConfiguration { +func (b *EndpointPortApplyConfiguration) WithProtocol(value corev1.Protocol) *EndpointPortApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go index 97002d2bb..a27c0ab1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go @@ -20,21 +20,21 @@ package v1 import ( discoveryv1 "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - AddressType *discoveryv1.AddressType `json:"addressType,omitempty"` - Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` - Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + AddressType *discoveryv1.AddressType `json:"addressType,omitempty"` + Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` + Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } // EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with @@ -88,7 +88,7 @@ func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *EndpointSliceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *Endpoint // If called multiple times, the Name field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *Endpoi // If called multiple times, the Namespace field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS // If called multiple times, the UID field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSlic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *End // If called multiple times, the Generation field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -279,5 +279,5 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *EndpointSliceApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go index 888319bc0..46133ea32 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/discovery/v1beta1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -32,7 +32,7 @@ import ( type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - AddressType *v1beta1.AddressType `json:"addressType,omitempty"` + AddressType *discoveryv1beta1.AddressType `json:"addressType,omitempty"` Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } @@ -59,18 +59,18 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { +func ExtractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { return extractEndpointSlice(endpointSlice, fieldManager, "") } // ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEndpointSliceStatus(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { +func ExtractEndpointSliceStatus(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { return extractEndpointSlice(endpointSlice, fieldManager, "status") } -func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) { +func extractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) { b := &EndpointSliceApplyConfiguration{} err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *EndpointSliceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *Endpoint // If called multiple times, the Name field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *Endpoi // If called multiple times, the Namespace field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS // If called multiple times, the UID field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSlic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *End // If called multiple times, the Generation field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,7 +245,7 @@ func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExis // WithAddressType sets the AddressType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AddressType field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithAddressType(value v1beta1.AddressType) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithAddressType(value discoveryv1beta1.AddressType) *EndpointSliceApplyConfiguration { b.AddressType = &value return b } @@ -279,5 +279,5 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *EndpointSliceApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go index a6e98d1c8..64896c3d8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go @@ -19,34 +19,34 @@ limitations under the License. package v1 import ( - apieventsv1 "k8s.io/api/events/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + eventsv1 "k8s.io/api/events/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" corev1 "k8s.io/client-go/applyconfigurations/core/v1" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - EventTime *metav1.MicroTime `json:"eventTime,omitempty"` - Series *EventSeriesApplyConfiguration `json:"series,omitempty"` - ReportingController *string `json:"reportingController,omitempty"` - ReportingInstance *string `json:"reportingInstance,omitempty"` - Action *string `json:"action,omitempty"` - Reason *string `json:"reason,omitempty"` - Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"` - Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"` - Note *string `json:"note,omitempty"` - Type *string `json:"type,omitempty"` - DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"` - DeprecatedFirstTimestamp *metav1.Time `json:"deprecatedFirstTimestamp,omitempty"` - DeprecatedLastTimestamp *metav1.Time `json:"deprecatedLastTimestamp,omitempty"` - DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"` + Series *EventSeriesApplyConfiguration `json:"series,omitempty"` + ReportingController *string `json:"reportingController,omitempty"` + ReportingInstance *string `json:"reportingInstance,omitempty"` + Action *string `json:"action,omitempty"` + Reason *string `json:"reason,omitempty"` + Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"` + Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"` + Note *string `json:"note,omitempty"` + Type *string `json:"type,omitempty"` + DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"` + DeprecatedFirstTimestamp *apismetav1.Time `json:"deprecatedFirstTimestamp,omitempty"` + DeprecatedLastTimestamp *apismetav1.Time `json:"deprecatedLastTimestamp,omitempty"` + DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } // Event constructs a declarative configuration of the Event type for use with @@ -71,18 +71,18 @@ func Event(name, namespace string) *EventApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEvent(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEvent(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "") } // ExtractEventStatus is the same as ExtractEvent except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEventStatus(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEventStatus(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "status") } -func extractEvent(event *apieventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { +func extractEvent(event *eventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b, subresource) if err != nil { @@ -100,7 +100,7 @@ func extractEvent(event *apieventsv1.Event, fieldManager string, subresource str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -108,7 +108,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -117,7 +117,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -126,7 +126,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -135,7 +135,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -144,7 +144,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -153,7 +153,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -162,25 +162,25 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -189,7 +189,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -199,11 +199,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E // overwriting an existing map entries in Labels field with the same key. func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -214,11 +214,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp // overwriting an existing map entries in Annotations field with the same key. func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -226,13 +226,13 @@ func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *Ev // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -243,21 +243,21 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } // WithEventTime sets the EventTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EventTime field is set to the value of the last call. -func (b *EventApplyConfiguration) WithEventTime(value metav1.MicroTime) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithEventTime(value apismetav1.MicroTime) *EventApplyConfiguration { b.EventTime = &value return b } @@ -345,7 +345,7 @@ func (b *EventApplyConfiguration) WithDeprecatedSource(value *corev1.EventSource // WithDeprecatedFirstTimestamp sets the DeprecatedFirstTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeprecatedFirstTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.DeprecatedFirstTimestamp = &value return b } @@ -353,7 +353,7 @@ func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value metav1.Time // WithDeprecatedLastTimestamp sets the DeprecatedLastTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeprecatedLastTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeprecatedLastTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeprecatedLastTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.DeprecatedLastTimestamp = &value return b } @@ -369,5 +369,5 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *EventApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go index 18069c0d1..c90954bcc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { - Count *int32 `json:"count,omitempty"` - LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` + Count *int32 `json:"count,omitempty"` + LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"` } // EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with @@ -46,7 +46,7 @@ func (b *EventSeriesApplyConfiguration) WithCount(value int32) *EventSeriesApply // WithLastObservedTime sets the LastObservedTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastObservedTime field is set to the value of the last call. -func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value v1.MicroTime) *EventSeriesApplyConfiguration { +func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value metav1.MicroTime) *EventSeriesApplyConfiguration { b.LastObservedTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go index 890d95748..dc302e395 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go @@ -100,7 +100,7 @@ func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -108,7 +108,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -117,7 +117,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -126,7 +126,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -135,7 +135,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -144,7 +144,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -153,7 +153,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -162,7 +162,7 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -171,7 +171,7 @@ func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -180,7 +180,7 @@ func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -189,7 +189,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -199,11 +199,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E // overwriting an existing map entries in Labels field with the same key. func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -214,11 +214,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp // overwriting an existing map entries in Annotations field with the same key. func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -232,7 +232,7 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -243,7 +243,7 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -369,5 +369,5 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *EventApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go index ff778529c..a75e38bfb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go @@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp // If called multiple times, the Name field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA // If called multiple times, the Namespace field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl // If called multiple times, the UID field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS // If called multiple times, the Generation field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae // overwriting an existing map entries in Annotations field with the same key. func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf // GetName retrieves the value of the Name field in the declarative configuration. func (b *DaemonSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go index 9b8057e69..0312a3099 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go @@ -20,18 +20,18 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { - Type *v1beta1.DaemonSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *extensionsv1beta1.DaemonSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with @@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetConditionApplyConfiguration) WithType(value v1beta1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { +func (b *DaemonSetConditionApplyConfiguration) WithType(value extensionsv1beta1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go index e597b15a6..d3403605f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go @@ -19,14 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) // DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { - Type *v1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` - RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` + Type *extensionsv1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` + RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } // DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with @@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1beta1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { +func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value extensionsv1beta1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go index 6badc64d8..94fac18c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go @@ -87,7 +87,7 @@ func extractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *DeploymentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go index 79e109a77..2b64508d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go @@ -20,19 +20,19 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *extensionsv1beta1.DeploymentConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value extensionsv1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go index 2d88406eb..b142b0deb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) // DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` + Type *extensionsv1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value extensionsv1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go index 3826e0ddd..32e0c8b1d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go @@ -19,14 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) // HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` - PathType *v1beta1.PathType `json:"pathType,omitempty"` + PathType *extensionsv1beta1.PathType `json:"pathType,omitempty"` Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } @@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP // WithPathType sets the PathType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PathType field is set to the value of the last call. -func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1beta1.PathType) *HTTPIngressPathApplyConfiguration { +func (b *HTTPIngressPathApplyConfiguration) WithPathType(value extensionsv1beta1.PathType) *HTTPIngressPathApplyConfiguration { b.PathType = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go index 6738bf07b..8cc05cc62 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go @@ -87,7 +87,7 @@ func extractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *In // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go index dc676f7b6..809fada92 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go @@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTP field is set to the value of the last call. func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration { - b.HTTP = value + b.IngressRuleValueApplyConfiguration.HTTP = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index fb1f95a6d..5ce0eb31f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -86,7 +86,7 @@ func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldM // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkP // If called multiple times, the Name field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *Networ // If called multiple times, the Namespace field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo // If called multiple times, the UID field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *Net // If called multiple times, the Generation field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPo // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -251,5 +251,5 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *NetworkPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go index 24c6b6ad1..97a972f53 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go @@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *ReplicaSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go index 21a25ae81..540079fe5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go @@ -20,18 +20,18 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { - Type *v1beta1.ReplicaSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *extensionsv1beta1.ReplicaSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with @@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1beta1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { +func (b *ReplicaSetConditionApplyConfiguration) WithType(value extensionsv1beta1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go index 101aa055b..53e73439e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" @@ -30,8 +30,8 @@ import ( type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *v1beta1.ScaleSpec `json:"spec,omitempty"` - Status *v1beta1.ScaleStatus `json:"status,omitempty"` + Spec *extensionsv1beta1.ScaleSpec `json:"spec,omitempty"` + Status *extensionsv1beta1.ScaleStatus `json:"status,omitempty"` } // ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with @@ -47,7 +47,7 @@ func Scale() *ScaleApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S // overwriting an existing map entries in Labels field with the same key. func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp // overwriting an existing map entries in Annotations field with the same key. func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -204,7 +204,7 @@ func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithSpec(value extensionsv1beta1.ScaleSpec) *ScaleApplyConfiguration { b.Spec = &value return b } @@ -212,7 +212,7 @@ func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyC // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithStatus(value extensionsv1beta1.ScaleStatus) *ScaleApplyConfiguration { b.Status = &value return b } @@ -220,5 +220,5 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleAp // GetName retrieves the value of the Name field in the declarative configuration. func (b *ScaleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index 0f3b61af9..f8923ae7b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) // FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 9e3978af5..3219319ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` - Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` + Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } // FlowSchema constructs a declarative configuration of the FlowSchema type for use with @@ -57,18 +57,18 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "") } // ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { +func extractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *FlowSchemaApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go index 5f26a66d2..d1c3dfbc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaCond // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 454ed8beb..dc2e919d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) // LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index bcce2679c..50d5e5132 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` - Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } // PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with @@ -57,18 +57,18 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") } // ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") } -func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontro // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { +func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries m // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { +func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go index 42ccbfbf9..a7810adfb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index 2262dedca..45e4cdcd8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) // PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index 1ec77ae89..e2f6f3849 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) // SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go index 29a8999b8..11aa62bba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) // FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go index 09bd25890..f5d69b8a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *FlowSchemaApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go index d1c3dbec6..e7dcb4366 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1beta1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1beta1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta1.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta1.FlowSchem // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go index 66f327601..20e1b17bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) // LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1beta1.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1beta1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1beta1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go index c4243f874..54030159e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go index 1ad4a554b..74eda9170 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1beta1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go index b013845f4..775f476dd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) // PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1beta1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go index b5c231f6d..000508065 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) // SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1beta1.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1beta1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1beta1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go index e3c4b97a7..3922c4729 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) // FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go index ffc3af950..fcab6df87 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *FlowSchemaApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go index 44571d263..f47130eeb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1beta2.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta2.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchem // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go index 38a513d30..58cd78006 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) // LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1beta2.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1beta2.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1beta2.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta2.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go index 7d52ca2c2..116bcfd31 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go index ddb17e984..caf517be3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1beta2.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go index c083ad0ba..c680ea1ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) // PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1beta2.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go index 2cfaab43d..2b569a628 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) // SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1beta2.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1beta2.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1beta2.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta2.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go index 49d84bd86..cc32fa100 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) // FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go index 1f69c43b2..5f6416c7c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *FlowSchemaApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go index 41d623aeb..d5ba21f71 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1beta3.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta3.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchem // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go index 8deaabdeb..2c289c777 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) // LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1beta3.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1beta3.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1beta3.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta3.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go index e7d1a3a5f..bb036c466 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go index 8e9687bb9..01695f144 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1beta3.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go index 9fa1112ce..c95085478 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) // PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1beta3.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go index c412b2a7a..46499f541 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) // SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1beta3.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1beta3.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1beta3.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta3.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go index 91944002d..8a4980675 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go @@ -85,7 +85,7 @@ func extractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithKind(value string) *ImageReviewApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ImageReviewApplyConfiguration) WithKind(value string) *ImageReviewApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithAPIVersion(value string) *ImageReviewApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ImageReviewApplyConfiguration) WithAPIVersion(value string) *ImageRevie // If called multiple times, the Name field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithName(value string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ImageReviewApplyConfiguration) WithName(value string) *ImageReviewApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithGenerateName(value string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ImageReviewApplyConfiguration) WithGenerateName(value string) *ImageRev // If called multiple times, the Namespace field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithNamespace(value string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ImageReviewApplyConfiguration) WithNamespace(value string) *ImageReview // If called multiple times, the UID field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithUID(value types.UID) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ImageReviewApplyConfiguration) WithUID(value types.UID) *ImageReviewApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithResourceVersion(value string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ImageReviewApplyConfiguration) WithResourceVersion(value string) *Image // If called multiple times, the Generation field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithGeneration(value int64) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ImageReviewApplyConfiguration) WithGeneration(value int64) *ImageReview // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ImageReviewApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ImageReviewApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ImageReviewApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ImageReviewApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ImageReviewApplyConfiguration) WithLabels(entries map[string]string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ImageReviewApplyConfiguration) WithLabels(entries map[string]string) *I // overwriting an existing map entries in Annotations field with the same key. func (b *ImageReviewApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ImageReviewApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ImageReviewApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ImageReviewApplyConfiguration) WithFinalizers(values ...string) *ImageReviewApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *ImageReviewApplyConfiguration) WithStatus(value *ImageReviewStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageReviewApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 43c9ae05a..cd9fcd98b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -19,8 +19,8 @@ limitations under the License. package internal import ( - "fmt" - "sync" + fmt "fmt" + sync "sync" typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) @@ -512,6 +512,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: url type: scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration + map: + fields: + - name: expression + type: + scalar: string - name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation map: fields: @@ -534,6 +540,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.JSONPatch + map: + fields: + - name: expression + type: + scalar: string - name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition map: fields: @@ -570,6 +582,100 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations elementRelationship: atomic elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec + map: + fields: + - name: matchResources + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: paramRef + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamRef + - name: policyName + type: + scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec + map: + fields: + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchConstraints + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: mutations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Mutation + elementRelationship: atomic + - name: paramKind + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamKind + - name: reinvocationPolicy + type: + scalar: string + - name: variables + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Variable + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.Mutation + map: + fields: + - name: applyConfiguration + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration + - name: jsonPatch + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.JSONPatch + - name: patchType + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations map: fields: @@ -4365,7 +4471,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: strategy type: scalar: string -- name: io.k8s.api.coordination.v1alpha1.LeaseCandidate +- name: io.k8s.api.coordination.v1alpha2.LeaseCandidate map: fields: - name: apiVersion @@ -4380,14 +4486,15 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec + namedType: io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec default: {} -- name: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec +- name: io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec map: fields: - name: binaryVersion type: scalar: string + default: "" - name: emulationVersion type: scalar: string @@ -4398,15 +4505,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: pingTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - - name: preferredStrategies - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string - name: io.k8s.api.coordination.v1beta1.Lease map: fields: @@ -6920,6 +7024,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: runAsUser type: scalar: numeric + - name: seLinuxChangePolicy + type: + scalar: string - name: seLinuxOptions type: namedType: io.k8s.api.core.v1.SELinuxOptions @@ -7060,6 +7167,9 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - name + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartPolicy type: scalar: string @@ -12244,12 +12354,38 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha3.AllocationResult +- name: io.k8s.api.resource.v1alpha3.AllocatedDeviceStatus map: fields: - - name: controller + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: data + type: + namedType: __untyped_atomic_ + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: networkData + type: + namedType: io.k8s.api.resource.v1alpha3.NetworkDeviceData + - name: pool type: scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.AllocationResult + map: + fields: - name: devices type: namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult @@ -12404,9 +12540,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.resource.v1alpha3.DeviceSelector elementRelationship: atomic - - name: suitableNodes - type: - namedType: io.k8s.api.core.v1.NodeSelector - name: io.k8s.api.resource.v1alpha3.DeviceConstraint map: fields: @@ -12425,7 +12558,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: adminAccess type: scalar: boolean - default: false - name: allocationMode type: scalar: string @@ -12449,6 +12581,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult map: fields: + - name: adminAccess + type: + scalar: boolean - name: device type: scalar: string @@ -12471,60 +12606,31 @@ var schemaYAML = typed.YAMLObject(`types: - name: cel type: namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector -- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration - map: - fields: - - name: driver - type: - scalar: string - default: "" - - name: parameters - type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext +- name: io.k8s.api.resource.v1alpha3.NetworkDeviceData map: fields: - - name: apiVersion + - name: hardwareAddress type: scalar: string - - name: kind + - name: interfaceName type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec - default: {} - - name: status - type: - namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus - default: {} -- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec - map: - fields: - - name: potentialNodes + - name: ips type: list: elementType: scalar: string elementRelationship: atomic - - name: selectedNode - type: - scalar: string -- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus +- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration map: fields: - - name: resourceClaims + - name: driver type: - list: - elementType: - namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus - elementRelationship: associative - keys: - - name + scalar: string + default: "" + - name: parameters + type: + namedType: __untyped_atomic_ - name: io.k8s.api.resource.v1alpha3.ResourceClaim map: fields: @@ -12564,25 +12670,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus - map: - fields: - - name: name - type: - scalar: string - default: "" - - name: unsuitableNodes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec map: fields: - - name: controller - type: - scalar: string - name: devices type: namedType: io.k8s.api.resource.v1alpha3.DeviceClaim @@ -12593,9 +12683,16 @@ var schemaYAML = typed.YAMLObject(`types: - name: allocation type: namedType: io.k8s.api.resource.v1alpha3.AllocationResult - - name: deallocationRequested + - name: devices type: - scalar: boolean + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.AllocatedDeviceStatus + elementRelationship: associative + keys: + - driver + - device + - pool - name: reservedFor type: list: @@ -12690,49 +12787,488 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.resource.v1alpha3.ResourcePool default: {} -- name: io.k8s.api.scheduling.v1.PriorityClass +- name: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus map: fields: - - name: apiVersion + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: data + type: + namedType: __untyped_atomic_ + - name: device type: scalar: string - - name: description + default: "" + - name: driver type: scalar: string - - name: globalDefault + default: "" + - name: networkData type: - scalar: boolean - - name: kind + namedType: io.k8s.api.resource.v1beta1.NetworkDeviceData + - name: pool type: scalar: string - - name: metadata + default: "" +- name: io.k8s.api.resource.v1beta1.AllocationResult + map: + fields: + - name: devices type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.resource.v1beta1.DeviceAllocationResult default: {} - - name: preemptionPolicy - type: - scalar: string - - name: value + - name: nodeSelector type: - scalar: numeric - default: 0 -- name: io.k8s.api.scheduling.v1alpha1.PriorityClass + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1beta1.BasicDevice map: fields: - - name: apiVersion + - name: attributes type: - scalar: string - - name: description + map: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceAttribute + - name: capacity + type: + map: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceCapacity +- name: io.k8s.api.resource.v1beta1.CELDeviceSelector + map: + fields: + - name: expression type: scalar: string - - name: globalDefault + default: "" +- name: io.k8s.api.resource.v1beta1.Device + map: + fields: + - name: basic type: - scalar: boolean - - name: kind + namedType: io.k8s.api.resource.v1beta1.BasicDevice + - name: name type: scalar: string - - name: metadata - type: + default: "" +- name: io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: source + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1beta1.DeviceAllocationResult + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration + elementRelationship: atomic + - name: results + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceAttribute + map: + fields: + - name: bool + type: + scalar: boolean + - name: int + type: + scalar: numeric + - name: string + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.resource.v1beta1.DeviceCapacity + map: + fields: + - name: value + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.resource.v1beta1.DeviceClaim + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceClaimConfiguration + elementRelationship: atomic + - name: constraints + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceConstraint + elementRelationship: atomic + - name: requests + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceRequest + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceClaimConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1beta1.DeviceClassSpec + default: {} +- name: io.k8s.api.resource.v1beta1.DeviceClassConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration +- name: io.k8s.api.resource.v1beta1.DeviceClassSpec + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceClassConfiguration + elementRelationship: atomic + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceConstraint + map: + fields: + - name: matchAttribute + type: + scalar: string + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceRequest + map: + fields: + - name: adminAccess + type: + scalar: boolean + - name: allocationMode + type: + scalar: string + - name: count + type: + scalar: numeric + - name: deviceClassName + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult + map: + fields: + - name: adminAccess + type: + scalar: boolean + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: pool + type: + scalar: string + default: "" + - name: request + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1beta1.DeviceSelector + map: + fields: + - name: cel + type: + namedType: io.k8s.api.resource.v1beta1.CELDeviceSelector +- name: io.k8s.api.resource.v1beta1.NetworkDeviceData + map: + fields: + - name: hardwareAddress + type: + scalar: string + - name: interfaceName + type: + scalar: string + - name: ips + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: parameters + type: + namedType: __untyped_atomic_ +- name: io.k8s.api.resource.v1beta1.ResourceClaim + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1beta1.ResourceClaimSpec + default: {} + - name: status + type: + namedType: io.k8s.api.resource.v1beta1.ResourceClaimStatus + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1beta1.ResourceClaimSpec + map: + fields: + - name: devices + type: + namedType: io.k8s.api.resource.v1beta1.DeviceClaim + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceClaimStatus + map: + fields: + - name: allocation + type: + namedType: io.k8s.api.resource.v1beta1.AllocationResult + - name: devices + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus + elementRelationship: associative + keys: + - driver + - device + - pool + - name: reservedFor + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference + elementRelationship: associative + keys: + - uid +- name: io.k8s.api.resource.v1beta1.ResourceClaimTemplate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec + map: + fields: + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1beta1.ResourceClaimSpec + default: {} +- name: io.k8s.api.resource.v1beta1.ResourcePool + map: + fields: + - name: generation + type: + scalar: numeric + default: 0 + - name: name + type: + scalar: string + default: "" + - name: resourceSliceCount + type: + scalar: numeric + default: 0 +- name: io.k8s.api.resource.v1beta1.ResourceSlice + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1beta1.ResourceSliceSpec + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceSliceSpec + map: + fields: + - name: allNodes + type: + scalar: boolean + - name: devices + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.Device + elementRelationship: atomic + - name: driver + type: + scalar: string + default: "" + - name: nodeName + type: + scalar: string + - name: nodeSelector + type: + namedType: io.k8s.api.core.v1.NodeSelector + - name: pool + type: + namedType: io.k8s.api.resource.v1beta1.ResourcePool + default: {} +- name: io.k8s.api.scheduling.v1.PriorityClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: description + type: + scalar: string + - name: globalDefault + type: + scalar: boolean + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: preemptionPolicy + type: + scalar: string + - name: value + type: + scalar: numeric + default: 0 +- name: io.k8s.api.scheduling.v1alpha1.PriorityClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: description + type: + scalar: string + - name: globalDefault + type: + scalar: boolean + - name: kind + type: + scalar: string + - name: metadata + type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - name: preemptionPolicy @@ -13539,6 +14075,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: gracePeriodSeconds type: scalar: numeric + - name: ignoreStoreReadErrorWithClusterBreakingPotential + type: + scalar: boolean - name: kind type: scalar: string diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go index 466aaebb6..69063df65 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ConditionApplyConfiguration represents a declarative configuration of the Condition type for use // with apply. type ConditionApplyConfiguration struct { - Type *string `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *string `json:"type,omitempty"` + Status *metav1.ConditionStatus `json:"status,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with @@ -50,7 +50,7 @@ func (b *ConditionApplyConfiguration) WithType(value string) *ConditionApplyConf // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ConditionApplyConfiguration { +func (b *ConditionApplyConfiguration) WithStatus(value metav1.ConditionStatus) *ConditionApplyConfiguration { b.Status = &value return b } @@ -66,7 +66,7 @@ func (b *ConditionApplyConfiguration) WithObservedGeneration(value int64) *Condi // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *ConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *ConditionApplyConfiguration { +func (b *ConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go index 313bb9784..ab398ef56 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go @@ -25,12 +25,13 @@ import ( // DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use // with apply. type DeleteOptionsApplyConfiguration struct { - TypeMetaApplyConfiguration `json:",inline"` - GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` - Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"` - OrphanDependents *bool `json:"orphanDependents,omitempty"` - PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"` - DryRun []string `json:"dryRun,omitempty"` + TypeMetaApplyConfiguration `json:",inline"` + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` + Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"` + OrphanDependents *bool `json:"orphanDependents,omitempty"` + PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"` + DryRun []string `json:"dryRun,omitempty"` + IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty"` } // DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with @@ -46,7 +47,7 @@ func DeleteOptions() *DeleteOptionsApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeleteOptionsApplyConfiguration) WithKind(value string) *DeleteOptionsApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -54,7 +55,7 @@ func (b *DeleteOptionsApplyConfiguration) WithKind(value string) *DeleteOptionsA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeleteOptionsApplyConfiguration) WithAPIVersion(value string) *DeleteOptionsApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -99,3 +100,11 @@ func (b *DeleteOptionsApplyConfiguration) WithDryRun(values ...string) *DeleteOp } return b } + +// WithIgnoreStoreReadErrorWithClusterBreakingPotential sets the IgnoreStoreReadErrorWithClusterBreakingPotential field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IgnoreStoreReadErrorWithClusterBreakingPotential field is set to the value of the last call. +func (b *DeleteOptionsApplyConfiguration) WithIgnoreStoreReadErrorWithClusterBreakingPotential(value bool) *DeleteOptionsApplyConfiguration { + b.IgnoreStoreReadErrorWithClusterBreakingPotential = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go index bd9db9659..c8b015c98 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use // with apply. type LabelSelectorRequirementApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Operator *v1.LabelSelectorOperator `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` + Key *string `json:"key,omitempty"` + Operator *metav1.LabelSelectorOperator `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` } // LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with @@ -47,7 +47,7 @@ func (b *LabelSelectorRequirementApplyConfiguration) WithKey(value string) *Labe // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *LabelSelectorRequirementApplyConfiguration) WithOperator(value v1.LabelSelectorOperator) *LabelSelectorRequirementApplyConfiguration { +func (b *LabelSelectorRequirementApplyConfiguration) WithOperator(value metav1.LabelSelectorOperator) *LabelSelectorRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go index 6913df822..7175537c3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use // with apply. type ManagedFieldsEntryApplyConfiguration struct { - Manager *string `json:"manager,omitempty"` - Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"` - APIVersion *string `json:"apiVersion,omitempty"` - Time *v1.Time `json:"time,omitempty"` - FieldsType *string `json:"fieldsType,omitempty"` - FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"` - Subresource *string `json:"subresource,omitempty"` + Manager *string `json:"manager,omitempty"` + Operation *metav1.ManagedFieldsOperationType `json:"operation,omitempty"` + APIVersion *string `json:"apiVersion,omitempty"` + Time *metav1.Time `json:"time,omitempty"` + FieldsType *string `json:"fieldsType,omitempty"` + FieldsV1 *metav1.FieldsV1 `json:"fieldsV1,omitempty"` + Subresource *string `json:"subresource,omitempty"` } // ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with @@ -51,7 +51,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithManager(value string) *Manage // WithOperation sets the Operation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operation field is set to the value of the last call. -func (b *ManagedFieldsEntryApplyConfiguration) WithOperation(value v1.ManagedFieldsOperationType) *ManagedFieldsEntryApplyConfiguration { +func (b *ManagedFieldsEntryApplyConfiguration) WithOperation(value metav1.ManagedFieldsOperationType) *ManagedFieldsEntryApplyConfiguration { b.Operation = &value return b } @@ -67,7 +67,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithAPIVersion(value string) *Man // WithTime sets the Time field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Time field is set to the value of the last call. -func (b *ManagedFieldsEntryApplyConfiguration) WithTime(value v1.Time) *ManagedFieldsEntryApplyConfiguration { +func (b *ManagedFieldsEntryApplyConfiguration) WithTime(value metav1.Time) *ManagedFieldsEntryApplyConfiguration { b.Time = &value return b } @@ -83,7 +83,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsType(value string) *Man // WithFieldsV1 sets the FieldsV1 field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FieldsV1 field is set to the value of the last call. -func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value v1.FieldsV1) *ManagedFieldsEntryApplyConfiguration { +func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value metav1.FieldsV1) *ManagedFieldsEntryApplyConfiguration { b.FieldsV1 = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go index a9419975e..9b98d2209 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" ) @@ -32,8 +32,8 @@ type ObjectMetaApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` Generation *int64 `json:"generation,omitempty"` - CreationTimestamp *v1.Time `json:"creationTimestamp,omitempty"` - DeletionTimestamp *v1.Time `json:"deletionTimestamp,omitempty"` + CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"` + DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty"` DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` Labels map[string]string `json:"labels,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` @@ -98,7 +98,7 @@ func (b *ObjectMetaApplyConfiguration) WithGeneration(value int64) *ObjectMetaAp // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value v1.Time) *ObjectMetaApplyConfiguration { +func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ObjectMetaApplyConfiguration { b.CreationTimestamp = &value return b } @@ -106,7 +106,7 @@ func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value v1.Time) *Obj // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ObjectMetaApplyConfiguration) WithDeletionTimestamp(value v1.Time) *ObjectMetaApplyConfiguration { +func (b *ObjectMetaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ObjectMetaApplyConfiguration { b.DeletionTimestamp = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go index e39670f29..96f9b1f56 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" ) // HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` - PathType *v1.PathType `json:"pathType,omitempty"` + PathType *networkingv1.PathType `json:"pathType,omitempty"` Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } @@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP // WithPathType sets the PathType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PathType field is set to the value of the last call. -func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1.PathType) *HTTPIngressPathApplyConfiguration { +func (b *HTTPIngressPathApplyConfiguration) WithPathType(value networkingv1.PathType) *HTTPIngressPathApplyConfiguration { b.PathType = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go index 607c26e94..9e275f24f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` - Status *IngressStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` + Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } // Ingress constructs a declarative configuration of the Ingress type for use with @@ -58,18 +58,18 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractIngress(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { +func ExtractIngress(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { return extractIngress(ingress, fieldManager, "") } // ExtractIngressStatus is the same as ExtractIngress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractIngressStatus(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { +func ExtractIngressStatus(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { return extractIngress(ingress, fieldManager, "status") } -func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { +func extractIngress(ingress *networkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { b := &IngressApplyConfiguration{} err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go index 14acc7dbd..f723b5d70 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } // IngressClass constructs a declarative configuration of the IngressClass type for use with @@ -56,18 +56,18 @@ func IngressClass(name string) *IngressClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { +func ExtractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { return extractIngressClass(ingressClass, fieldManager, "") } // ExtractIngressClassStatus is the same as ExtractIngressClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractIngressClassStatus(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { +func ExtractIngressClassStatus(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { return extractIngressClass(ingressClass, fieldManager, "status") } -func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) { +func extractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) { b := &IngressClassApplyConfiguration{} err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressCl // If called multiple times, the Name field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *Ingress // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla // If called multiple times, the UID field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *Ingr // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressClassApplyConfiguration { +func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressClassApplyConfiguration { +func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressClassApplyConfiguration { +func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *IngressClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -249,5 +249,5 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go index b6411199f..84ba243ab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { - Port *int32 `json:"port,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - Error *string `json:"error,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` } // IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with @@ -47,7 +47,7 @@ func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPort // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value corev1.Protocol) *IngressPortStatusApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go index 4ef871f07..20a1816bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go @@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTP field is set to the value of the last call. func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration { - b.HTTP = value + b.IngressRuleValueApplyConfiguration.HTTP = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 3f8c8a535..e8da1be06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } // NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with @@ -57,18 +57,18 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { +func ExtractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { return extractNetworkPolicy(networkPolicy, fieldManager, "") } // ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNetworkPolicyStatus(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { +func ExtractNetworkPolicyStatus(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { return extractNetworkPolicy(networkPolicy, fieldManager, "status") } -func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) { +func extractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) { b := &NetworkPolicyApplyConfiguration{} err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldMan // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkP // If called multiple times, the Name field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *Networ // If called multiple times, the Namespace field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo // If called multiple times, the UID field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *Net // If called multiple times, the Generation field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { +func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { +func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkPolicyApplyConfiguration { +func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NetworkPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -251,5 +251,5 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *NetworkPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go index 046de3e23..716ceeeef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go @@ -19,15 +19,15 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { - PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` + PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } // NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with @@ -39,7 +39,7 @@ func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { +func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { b.PodSelector = value return b } @@ -47,7 +47,7 @@ func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *v1.LabelSel // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *NetworkPolicyPeerApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { +func (b *NetworkPolicyPeerApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { b.NamespaceSelector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go index 581ef1c34..2ded0aecf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) // NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { - Protocol *v1.Protocol `json:"protocol,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` Port *intstr.IntOrString `json:"port,omitempty"` EndPort *int32 `json:"endPort,omitempty"` } @@ -40,7 +40,7 @@ func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *NetworkPolicyPortApplyConfiguration) WithProtocol(value v1.Protocol) *NetworkPolicyPortApplyConfiguration { +func (b *NetworkPolicyPortApplyConfiguration) WithProtocol(value corev1.Protocol) *NetworkPolicyPortApplyConfiguration { b.Protocol = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go index da5ed5d35..48369b921 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { - PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"` Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"` - PolicyTypes []apinetworkingv1.PolicyType `json:"policyTypes,omitempty"` + PolicyTypes []networkingv1.PolicyType `json:"policyTypes,omitempty"` } // NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with @@ -41,7 +41,7 @@ func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *NetworkPolicySpecApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicySpecApplyConfiguration { +func (b *NetworkPolicySpecApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicySpecApplyConfiguration { b.PodSelector = value return b } @@ -75,7 +75,7 @@ func (b *NetworkPolicySpecApplyConfiguration) WithEgress(values ...*NetworkPolic // WithPolicyTypes adds the given value to the PolicyTypes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the PolicyTypes field. -func (b *NetworkPolicySpecApplyConfiguration) WithPolicyTypes(values ...apinetworkingv1.PolicyType) *NetworkPolicySpecApplyConfiguration { +func (b *NetworkPolicySpecApplyConfiguration) WithPolicyTypes(values ...networkingv1.PolicyType) *NetworkPolicySpecApplyConfiguration { for i := range values { b.PolicyTypes = append(b.PolicyTypes, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index 999c23fa1..cc7880992 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -84,7 +84,7 @@ func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApp // If called multiple times, the Name field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressA // If called multiple times, the Namespace field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressAppl // If called multiple times, the UID field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddre // If called multiple times, the Generation field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPA // overwriting an existing map entries in Annotations field with the same key. func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *IPAddressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go index 984e049f2..27d3f271b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -85,7 +85,7 @@ func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCID // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceC // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDR // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *Servi // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDR // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCIDRApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go index 61b458f7e..c7301c6a3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go @@ -19,14 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" ) // HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` - PathType *v1beta1.PathType `json:"pathType,omitempty"` + PathType *networkingv1beta1.PathType `json:"pathType,omitempty"` Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } @@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP // WithPathType sets the PathType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PathType field is set to the value of the last call. -func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1beta1.PathType) *HTTPIngressPathApplyConfiguration { +func (b *HTTPIngressPathApplyConfiguration) WithPathType(value networkingv1beta1.PathType) *HTTPIngressPathApplyConfiguration { b.PathType = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go index 0df53ea65..5d26cd75c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go @@ -87,7 +87,7 @@ func extractIngress(ingress *networkingv1beta1.Ingress, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *In // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go index b0e877b57..272e0339f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go @@ -84,7 +84,7 @@ func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldMana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressCl // If called multiple times, the Name field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *Ingress // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla // If called multiple times, the UID field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *Ingr // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go index dc676f7b6..809fada92 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go @@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTP field is set to the value of the last call. func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration { - b.HTTP = value + b.IngressRuleValueApplyConfiguration.HTTP = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go index 3047d79b9..f58b54da5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go @@ -84,7 +84,7 @@ func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApp // If called multiple times, the Name field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressA // If called multiple times, the Namespace field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressAppl // If called multiple times, the UID field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddre // If called multiple times, the Generation field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPA // overwriting an existing map entries in Annotations field with the same key. func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *IPAddressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go index 4ef8e9eca..6a53db5c0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go @@ -85,7 +85,7 @@ func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCID // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceC // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDR // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *Servi // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDR // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCIDRApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go index 6694538fc..30ce9fb42 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { - PodFixed *v1.ResourceList `json:"podFixed,omitempty"` + PodFixed *corev1.ResourceList `json:"podFixed,omitempty"` } // OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with @@ -37,7 +37,7 @@ func Overhead() *OverheadApplyConfiguration { // WithPodFixed sets the PodFixed field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodFixed field is set to the value of the last call. -func (b *OverheadApplyConfiguration) WithPodFixed(value v1.ResourceList) *OverheadApplyConfiguration { +func (b *OverheadApplyConfiguration) WithPodFixed(value corev1.ResourceList) *OverheadApplyConfiguration { b.PodFixed = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go index 6ce01a319..067dc1703 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - apinodev1 "k8s.io/api/node/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + nodev1 "k8s.io/api/node/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Handler *string `json:"handler,omitempty"` - Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"` - Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Handler *string `json:"handler,omitempty"` + Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"` + Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } // RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with @@ -58,18 +58,18 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { +func ExtractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { return extractRuntimeClass(runtimeClass, fieldManager, "") } // ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractRuntimeClassStatus(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { +func ExtractRuntimeClassStatus(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { return extractRuntimeClass(runtimeClass, fieldManager, "status") } -func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { +func extractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { b := &RuntimeClassApplyConfiguration{} err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl // If called multiple times, the Name field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime // If called multiple times, the Namespace field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla // If called multiple times, the UID field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt // If called multiple times, the Generation field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { +func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { +func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RuntimeClassApplyConfiguration { +func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *RuntimeClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -267,5 +267,5 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *RuntimeClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go index 2d084e0f5..b45400fbc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) // SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } // SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with @@ -52,7 +52,7 @@ func (b *SchedulingApplyConfiguration) WithNodeSelector(entries map[string]strin // WithTolerations adds the given value to the Tolerations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Tolerations field. -func (b *SchedulingApplyConfiguration) WithTolerations(values ...*v1.TolerationApplyConfiguration) *SchedulingApplyConfiguration { +func (b *SchedulingApplyConfiguration) WithTolerations(values ...*corev1.TolerationApplyConfiguration) *SchedulingApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithTolerations") diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go index 9f139ee1b..5ddca3b6e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go @@ -84,7 +84,7 @@ func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl // If called multiple times, the Name field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime // If called multiple times, the Namespace field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla // If called multiple times, the UID field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt // If called multiple times, the Generation field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *RuntimeClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go index fa6c9f45b..b17de6763 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go @@ -86,7 +86,7 @@ func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl // If called multiple times, the Name field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime // If called multiple times, the Namespace field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla // If called multiple times, the UID field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt // If called multiple times, the Generation field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -267,5 +267,5 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *RuntimeClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go index 3a051619f..079c6f3bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go @@ -20,19 +20,19 @@ package v1 import ( policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DeleteOptions *metav1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } // Eviction constructs a declarative configuration of the Eviction type for use with @@ -86,7 +86,7 @@ func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApply // If called multiple times, the Name field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfig // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApp // If called multiple times, the Namespace field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC // If called multiple times, the UID field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConf // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *Eviction // If called multiple times, the Generation field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *E // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *Evic // overwriting an existing map entries in Annotations field with the same key. func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,21 +229,21 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } // WithDeleteOptions sets the DeleteOptions field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeleteOptions field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithDeleteOptions(value *metav1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration { b.DeleteOptions = value return b } @@ -251,5 +251,5 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp // GetName retrieves the value of the Name field in the declarative configuration. func (b *EvictionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go index a765a7b62..82ec5a082 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apipolicyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + policyv1 "k8s.io/api/policy/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } // PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with @@ -58,18 +58,18 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { +func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "") } // ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { +func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status") } -func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) { +func extractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) { b := &PodDisruptionBudgetApplyConfiguration{} err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisruptionBudgetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisru // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *PodDisruptionBudgetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *Po // If called multiple times, the Name field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisru // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) * // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod // If called multiple times, the UID field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDis // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string // If called multiple times, the Generation field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { +func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { +func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value meta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(v // overwriting an existing map entries in Labels field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]st // overwriting an existing map entries in Annotations field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodDisruptionBudgetApplyConfiguration { +func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodDisruptionBudgetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -260,5 +260,5 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB // GetName retrieves the value of the Name field in the declarative configuration. func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go index 291714545..3c66739bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go @@ -21,14 +21,14 @@ package v1 import ( policyv1 "k8s.io/api/policy/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } @@ -50,7 +50,7 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMinAvailable(value intst // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *PodDisruptionBudgetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *PodDisruptionBudgetSpecApplyConfiguration { +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *PodDisruptionBudgetSpecApplyConfiguration { b.Selector = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go index d0f9baf41..d3c44d90a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - DisruptedPods map[string]v1.Time `json:"disruptedPods,omitempty"` - DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"` - CurrentHealthy *int32 `json:"currentHealthy,omitempty"` - DesiredHealthy *int32 `json:"desiredHealthy,omitempty"` - ExpectedPods *int32 `json:"expectedPods,omitempty"` - Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty"` + DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"` + CurrentHealthy *int32 `json:"currentHealthy,omitempty"` + DesiredHealthy *int32 `json:"desiredHealthy,omitempty"` + ExpectedPods *int32 `json:"expectedPods,omitempty"` + Conditions []applyconfigurationsmetav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } // PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with @@ -53,9 +53,9 @@ func (b *PodDisruptionBudgetStatusApplyConfiguration) WithObservedGeneration(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the DisruptedPods field, // overwriting an existing map entries in DisruptedPods field with the same key. -func (b *PodDisruptionBudgetStatusApplyConfiguration) WithDisruptedPods(entries map[string]v1.Time) *PodDisruptionBudgetStatusApplyConfiguration { +func (b *PodDisruptionBudgetStatusApplyConfiguration) WithDisruptedPods(entries map[string]metav1.Time) *PodDisruptionBudgetStatusApplyConfiguration { if b.DisruptedPods == nil && len(entries) > 0 { - b.DisruptedPods = make(map[string]v1.Time, len(entries)) + b.DisruptedPods = make(map[string]metav1.Time, len(entries)) } for k, v := range entries { b.DisruptedPods[k] = v @@ -98,7 +98,7 @@ func (b *PodDisruptionBudgetStatusApplyConfiguration) WithExpectedPods(value int // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *PodDisruptionBudgetStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *PodDisruptionBudgetStatusApplyConfiguration { +func (b *PodDisruptionBudgetStatusApplyConfiguration) WithConditions(values ...*applyconfigurationsmetav1.ConditionApplyConfiguration) *PodDisruptionBudgetStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go index d4121af20..0b5945935 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -57,18 +57,18 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEviction(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { +func ExtractEviction(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { return extractEviction(eviction, fieldManager, "") } // ExtractEvictionStatus is the same as ExtractEviction except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEvictionStatus(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { +func ExtractEvictionStatus(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { return extractEviction(eviction, fieldManager, "status") } -func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) { +func extractEviction(eviction *policyv1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) { b := &EvictionApplyConfiguration{} err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApply // If called multiple times, the Name field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfig // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApp // If called multiple times, the Namespace field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC // If called multiple times, the UID field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConf // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *Eviction // If called multiple times, the Generation field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *E // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *E // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *Evic // overwriting an existing map entries in Annotations field with the same key. func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -251,5 +251,5 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp // GetName retrieves the value of the Name field in the declarative configuration. func (b *EvictionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go index 813b57bae..7743da76a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go @@ -87,7 +87,7 @@ func extractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruption // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisruptionBudgetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisru // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *PodDisruptionBudgetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *Po // If called multiple times, the Name field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisru // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) * // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod // If called multiple times, the UID field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDis // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string // If called multiple times, the Generation field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *Pod // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value meta // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value meta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(v // overwriting an existing map entries in Labels field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]st // overwriting an existing map entries in Annotations field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB // GetName retrieves the value of the Name field in the declarative configuration. func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go index 405f1148b..d8fecf7a3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -27,10 +27,10 @@ import ( // PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + UnhealthyPodEvictionPolicy *policyv1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } // PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with @@ -66,7 +66,7 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int // WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call. -func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value v1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value policyv1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { b.UnhealthyPodEvictionPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go index 5ae4dc37f..b7049a8ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { - ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` + ClusterRoleSelectors []metav1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } // AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with @@ -37,7 +37,7 @@ func AggregationRule() *AggregationRuleApplyConfiguration { // WithClusterRoleSelectors adds the given value to the ClusterRoleSelectors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ClusterRoleSelectors field. -func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*v1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration { +func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*metav1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithClusterRoleSelectors") diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go index c5b0075ec..9b46fdbe9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` - AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` + AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } // ClusterRole constructs a declarative configuration of the ClusterRole type for use with @@ -57,18 +57,18 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { +func ExtractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { return extractClusterRole(clusterRole, fieldManager, "") } // ExtractClusterRoleStatus is the same as ExtractClusterRole except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterRoleStatus(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { +func ExtractClusterRoleStatus(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { return extractClusterRole(clusterRole, fieldManager, "status") } -func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { +func extractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { b := &ClusterRoleApplyConfiguration{} err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { +func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { +func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration { +func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -263,5 +263,5 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterRoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go index 91a9d5df3..7775bff0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` - RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } // ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with @@ -57,18 +57,18 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { +func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "") } // ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterRoleBindingStatus(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { +func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status") } -func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { +func extractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { b := &ClusterRoleBindingApplyConfiguration{} err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { +func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { +func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration { +func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -263,5 +263,5 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go index b51f90426..b592753f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } // Role constructs a declarative configuration of the Role type for use with @@ -57,18 +57,18 @@ func Role(name, namespace string) *RoleApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractRole(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { +func ExtractRole(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { return extractRole(role, fieldManager, "") } // ExtractRoleStatus is the same as ExtractRole except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractRoleStatus(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { +func ExtractRoleStatus(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { return extractRole(role, fieldManager, "status") } -func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { +func extractRole(role *rbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { b := &RoleApplyConfiguration{} err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration { +func (b *RoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration { +func (b *RoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro // overwriting an existing map entries in Labels field with the same key. func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl // overwriting an existing map entries in Annotations field with the same key. func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *Rol // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration { +func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,5 +256,5 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati // GetName retrieves the value of the Name field in the declarative configuration. func (b *RoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go index e59c8e6d3..32f12e87c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` - RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } // RoleBinding constructs a declarative configuration of the RoleBinding type for use with @@ -58,18 +58,18 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { +func ExtractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { return extractRoleBinding(roleBinding, fieldManager, "") } // ExtractRoleBindingStatus is the same as ExtractRoleBinding except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractRoleBindingStatus(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { +func ExtractRoleBindingStatus(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { return extractRoleBinding(roleBinding, fieldManager, "status") } -func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { +func extractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { b := &RoleBindingApplyConfiguration{} err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin // If called multiple times, the Name field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding // If called multiple times, the UID field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { +func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { +func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R // overwriting an existing map entries in Annotations field with the same key. func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration { +func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -265,5 +265,5 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura // GetName retrieves the value of the Name field in the declarative configuration. func (b *RoleBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go index dc0e34e53..ecc75d340 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go @@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRole // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -263,5 +263,5 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterRoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go index d3c12ec50..3b8c43a39 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go @@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBindi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -263,5 +263,5 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go index db0a4f716..3fbd98543 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go @@ -86,7 +86,7 @@ func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfigura // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro // overwriting an existing map entries in Labels field with the same key. func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl // overwriting an existing map entries in Annotations field with the same key. func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,5 +256,5 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati // GetName retrieves the value of the Name field in the declarative configuration. func (b *RoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go index 8efcddd69..37c0d37cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go @@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin // If called multiple times, the Name field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding // If called multiple times, the UID field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBinding // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R // overwriting an existing map entries in Annotations field with the same key. func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -265,5 +265,5 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura // GetName retrieves the value of the Name field in the declarative configuration. func (b *RoleBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go index 5e9c23854..124e47ef7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go @@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRole // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -263,5 +263,5 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterRoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go index 2f088b93e..140e7e176 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go @@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBindin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -263,5 +263,5 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go index 4b1b6112b..82240514f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go @@ -86,7 +86,7 @@ func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfigura // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro // overwriting an existing map entries in Labels field with the same key. func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl // overwriting an existing map entries in Annotations field with the same key. func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,5 +256,5 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati // GetName retrieves the value of the Name field in the declarative configuration. func (b *RoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go index 246928553..1c66b976e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go @@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin // If called multiple times, the Name field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding // If called multiple times, the UID field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBinding // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R // overwriting an existing map entries in Annotations field with the same key. func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -265,5 +265,5 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura // GetName retrieves the value of the Name field in the declarative configuration. func (b *RoleBindingApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go new file mode 100644 index 000000000..da58d4348 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use +// with apply. +type AllocatedDeviceStatusApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Data *runtime.RawExtension `json:"data,omitempty"` + NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"` +} + +// AllocatedDeviceStatusApplyConfiguration constructs a declarative configuration of the AllocatedDeviceStatus type for use with +// apply. +func AllocatedDeviceStatus() *AllocatedDeviceStatusApplyConfiguration { + return &AllocatedDeviceStatusApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDriver(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithPool(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDevice(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Device = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *AllocatedDeviceStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithData(value runtime.RawExtension) *AllocatedDeviceStatusApplyConfiguration { + b.Data = &value + return b +} + +// WithNetworkData sets the NetworkData field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkData field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithNetworkData(value *NetworkDeviceDataApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + b.NetworkData = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go index 3090b2f9d..7c7427ee9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go @@ -27,7 +27,6 @@ import ( type AllocationResultApplyConfiguration struct { Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - Controller *string `json:"controller,omitempty"` } // AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with @@ -51,11 +50,3 @@ func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSele b.NodeSelector = value return b } - -// WithController sets the Controller field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Controller field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration { - b.Controller = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go index e6b774508..b58e43294 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go @@ -19,15 +19,15 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" resource "k8s.io/apimachinery/pkg/api/resource" ) // BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use // with apply. type BasicDeviceApplyConfiguration struct { - Attributes map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` - Capacity map[v1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"` + Attributes map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[resourcev1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"` } // BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with @@ -40,9 +40,9 @@ func BasicDevice() *BasicDeviceApplyConfiguration { // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Attributes field, // overwriting an existing map entries in Attributes field with the same key. -func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { if b.Attributes == nil && len(entries) > 0 { - b.Attributes = make(map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + b.Attributes = make(map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) } for k, v := range entries { b.Attributes[k] = v @@ -54,9 +54,9 @@ func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.Qual // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Capacity field, // overwriting an existing map entries in Capacity field with the same key. -func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[v1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration { +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[resourcev1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration { if b.Capacity == nil && len(entries) > 0 { - b.Capacity = make(map[v1alpha3.QualifiedName]resource.Quantity, len(entries)) + b.Capacity = make(map[resourcev1alpha3.QualifiedName]resource.Quantity, len(entries)) } for k, v := range entries { b.Capacity[k] = v diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go index 342e724ef..25907e40d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go @@ -19,14 +19,14 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" ) // DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use // with apply. type DeviceAllocationConfigurationApplyConfiguration struct { - Source *v1alpha3.AllocationConfigSource `json:"source,omitempty"` - Requests []string `json:"requests,omitempty"` + Source *resourcev1alpha3.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` DeviceConfigurationApplyConfiguration `json:",inline"` } @@ -39,7 +39,7 @@ func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfigur // WithSource sets the Source field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Source field is set to the value of the last call. -func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value v1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value resourcev1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { b.Source = &value return b } @@ -58,6 +58,6 @@ func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values .. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Opaque field is set to the value of the last call. func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { - b.Opaque = value + b.DeviceConfigurationApplyConfiguration.Opaque = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go index 4cabe9859..045798856 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go @@ -45,6 +45,6 @@ func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Opaque field is set to the value of the last call. func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { - b.Opaque = value + b.DeviceConfigurationApplyConfiguration.Opaque = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go index abaadbb36..ae3e396e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go @@ -84,7 +84,7 @@ func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClas // If called multiple times, the Name field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceCl // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClass // If called multiple times, the UID field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *Devic // If called multiple times, the Generation field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClass // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *D // overwriting an existing map entries in Annotations field with the same key. func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConf // GetName retrieves the value of the Name field in the declarative configuration. func (b *DeviceClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go index cb3758a3e..6daa4a97e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go @@ -34,6 +34,6 @@ func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Opaque field is set to the value of the last call. func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { - b.Opaque = value + b.DeviceConfigurationApplyConfiguration.Opaque = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go index d40a43de6..37db6a1cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go @@ -18,16 +18,11 @@ limitations under the License. package v1alpha3 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - // DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use // with apply. type DeviceClassSpecApplyConfiguration struct { - Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` - Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` - SuitableNodes *v1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` } // DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with @@ -61,11 +56,3 @@ func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassCon } return b } - -// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SuitableNodes field is set to the value of the last call. -func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { - b.SuitableNodes = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go index 479acd57c..712f431f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go @@ -19,14 +19,14 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" ) // DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use // with apply. type DeviceConstraintApplyConfiguration struct { - Requests []string `json:"requests,omitempty"` - MatchAttribute *v1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"` + Requests []string `json:"requests,omitempty"` + MatchAttribute *resourcev1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"` } // DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with @@ -48,7 +48,7 @@ func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *Dev // WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MatchAttribute field is set to the value of the last call. -func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value v1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration { +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value resourcev1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration { b.MatchAttribute = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go index 712b9bf9b..4c3cffcf4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go @@ -21,10 +21,11 @@ package v1alpha3 // DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use // with apply. type DeviceRequestAllocationResultApplyConfiguration struct { - Request *string `json:"request,omitempty"` - Driver *string `json:"driver,omitempty"` - Pool *string `json:"pool,omitempty"` - Device *string `json:"device,omitempty"` + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` } // DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with @@ -64,3 +65,11 @@ func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value strin b.Device = &value return b } + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestAllocationResultApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go new file mode 100644 index 000000000..9ea773ed4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use +// with apply. +type NetworkDeviceDataApplyConfiguration struct { + InterfaceName *string `json:"interfaceName,omitempty"` + IPs []string `json:"ips,omitempty"` + HardwareAddress *string `json:"hardwareAddress,omitempty"` +} + +// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with +// apply. +func NetworkDeviceData() *NetworkDeviceDataApplyConfiguration { + return &NetworkDeviceDataApplyConfiguration{} +} + +// WithInterfaceName sets the InterfaceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InterfaceName field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithInterfaceName(value string) *NetworkDeviceDataApplyConfiguration { + b.InterfaceName = &value + return b +} + +// WithIPs adds the given value to the IPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IPs field. +func (b *NetworkDeviceDataApplyConfiguration) WithIPs(values ...string) *NetworkDeviceDataApplyConfiguration { + for i := range values { + b.IPs = append(b.IPs, values[i]) + } + return b +} + +// WithHardwareAddress sets the HardwareAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HardwareAddress field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithHardwareAddress(value string) *NetworkDeviceDataApplyConfiguration { + b.HardwareAddress = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go deleted file mode 100644 index fd25df7a5..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use -// with apply. -type PodSchedulingContextSpecApplyConfiguration struct { - SelectedNode *string `json:"selectedNode,omitempty"` - PotentialNodes []string `json:"potentialNodes,omitempty"` -} - -// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with -// apply. -func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { - return &PodSchedulingContextSpecApplyConfiguration{} -} - -// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelectedNode field is set to the value of the last call. -func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { - b.SelectedNode = &value - return b -} - -// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the PotentialNodes field. -func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { - for i := range values { - b.PotentialNodes = append(b.PotentialNodes, values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go deleted file mode 100644 index a06e370cc..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use -// with apply. -type PodSchedulingContextStatusApplyConfiguration struct { - ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` -} - -// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with -// apply. -func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { - return &PodSchedulingContextStatusApplyConfiguration{} -} - -// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceClaims field. -func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceClaims") - } - b.ResourceClaims = append(b.ResourceClaims, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go index 616159558..96cf63f1f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go @@ -87,7 +87,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldMa // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *Resource // If called multiple times, the Name field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *Resour // If called multiple times, the Namespace field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceC // If called multiple times, the UID field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClai // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *Res // If called multiple times, the Generation field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,5 +260,5 @@ func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusA // GetName retrieves the value of the Name field in the declarative configuration. func (b *ResourceClaimApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go deleted file mode 100644 index caab89acd..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use -// with apply. -type ResourceClaimSchedulingStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` -} - -// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with -// apply. -func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { - return &ResourceClaimSchedulingStatusApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithName(value string) *ResourceClaimSchedulingStatusApplyConfiguration { - b.Name = &value - return b -} - -// WithUnsuitableNodes adds the given value to the UnsuitableNodes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the UnsuitableNodes field. -func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithUnsuitableNodes(values ...string) *ResourceClaimSchedulingStatusApplyConfiguration { - for i := range values { - b.UnsuitableNodes = append(b.UnsuitableNodes, values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go index 7c5b65681..dfe8bdb14 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go @@ -21,8 +21,7 @@ package v1alpha3 // ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use // with apply. type ResourceClaimSpecApplyConfiguration struct { - Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` - Controller *string `json:"controller,omitempty"` + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` } // ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with @@ -38,11 +37,3 @@ func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimAppl b.Devices = value return b } - -// WithController sets the Controller field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Controller field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration { - b.Controller = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go index a52af3ec3..f0c32133a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go @@ -21,9 +21,9 @@ package v1alpha3 // ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use // with apply. type ResourceClaimStatusApplyConfiguration struct { - Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` - ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` - DeallocationRequested *bool `json:"deallocationRequested,omitempty"` + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` + Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"` } // ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with @@ -53,10 +53,15 @@ func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*Resou return b } -// WithDeallocationRequested sets the DeallocationRequested field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeallocationRequested field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithDeallocationRequested(value bool) *ResourceClaimStatusApplyConfiguration { - b.DeallocationRequested = &value +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceClaimStatusApplyConfiguration) WithDevices(values ...*AllocatedDeviceStatusApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go index 6f371d0c0..1eb55eee4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go @@ -86,7 +86,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.Resour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *Resour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *Resour // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *R // If called multiple times, the UID field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *Reso // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *R // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value me // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ... if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ... func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -251,5 +251,5 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimT // GetName retrieves the value of the Name field in the declarative configuration. func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go index 5b03ab755..578f6bce1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go @@ -42,7 +42,7 @@ func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *Re // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,7 +87,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -96,7 +96,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -105,7 +105,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -157,7 +157,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,7 +168,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -190,5 +190,5 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceCl // GetName retrieves the value of the Name field in the declarative configuration. func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go index aaad68612..615cf3e06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go @@ -84,7 +84,7 @@ func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldMa // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *Resource // If called multiple times, the Name field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *Resour // If called multiple times, the Namespace field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceS // If called multiple times, the UID field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSlic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *Res // If called multiple times, the Generation field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApply // GetName retrieves the value of the Name field in the declarative configuration. func (b *ResourceSliceApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go new file mode 100644 index 000000000..cd5189771 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use +// with apply. +type AllocatedDeviceStatusApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Data *runtime.RawExtension `json:"data,omitempty"` + NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"` +} + +// AllocatedDeviceStatusApplyConfiguration constructs a declarative configuration of the AllocatedDeviceStatus type for use with +// apply. +func AllocatedDeviceStatus() *AllocatedDeviceStatusApplyConfiguration { + return &AllocatedDeviceStatusApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDriver(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithPool(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDevice(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Device = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *AllocatedDeviceStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithData(value runtime.RawExtension) *AllocatedDeviceStatusApplyConfiguration { + b.Data = &value + return b +} + +// WithNetworkData sets the NetworkData field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkData field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithNetworkData(value *NetworkDeviceDataApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + b.NetworkData = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go new file mode 100644 index 000000000..549ef71af --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` +} + +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration { + b.Devices = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.NodeSelector = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go new file mode 100644 index 000000000..691a8f15a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use +// with apply. +type BasicDeviceApplyConfiguration struct { + Attributes map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"` +} + +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with +// apply. +func BasicDevice() *BasicDeviceApplyConfiguration { + return &BasicDeviceApplyConfiguration{} +} + +// WithAttributes puts the entries into the Attributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Attributes field, +// overwriting an existing map entries in Attributes field with the same key. +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Attributes == nil && len(entries) > 0 { + b.Attributes = make(map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Attributes[k] = v + } + return b +} + +// WithCapacity puts the entries into the Capacity field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Capacity field, +// overwriting an existing map entries in Capacity field with the same key. +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Capacity == nil && len(entries) > 0 { + b.Capacity = make(map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Capacity[k] = v + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go new file mode 100644 index 000000000..c4a28bbf8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use +// with apply. +type CELDeviceSelectorApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with +// apply. +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration { + return &CELDeviceSelectorApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go new file mode 100644 index 000000000..f635267e2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use +// with apply. +type DeviceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"` +} + +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with +// apply. +func Device() *DeviceApplyConfiguration { + return &DeviceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration { + b.Name = &value + return b +} + +// WithBasic sets the Basic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Basic field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration { + b.Basic = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go new file mode 100644 index 000000000..b5218ba4a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use +// with apply. +type DeviceAllocationConfigurationApplyConfiguration struct { + Source *resourcev1beta1.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with +// apply. +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration { + return &DeviceAllocationConfigurationApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value resourcev1beta1.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { + b.Source = &value + return b +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go new file mode 100644 index 000000000..bf309cf23 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use +// with apply. +type DeviceAllocationResultApplyConfiguration struct { + Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"` + Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with +// apply. +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration { + return &DeviceAllocationResultApplyConfiguration{} +} + +// WithResults adds the given value to the Results field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Results field. +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResults") + } + b.Results = append(b.Results, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go new file mode 100644 index 000000000..6e88ae38a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use +// with apply. +type DeviceAttributeApplyConfiguration struct { + IntValue *int64 `json:"int,omitempty"` + BoolValue *bool `json:"bool,omitempty"` + StringValue *string `json:"string,omitempty"` + VersionValue *string `json:"version,omitempty"` +} + +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with +// apply. +func DeviceAttribute() *DeviceAttributeApplyConfiguration { + return &DeviceAttributeApplyConfiguration{} +} + +// WithIntValue sets the IntValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration { + b.IntValue = &value + return b +} + +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BoolValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration { + b.BoolValue = &value + return b +} + +// WithStringValue sets the StringValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StringValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration { + b.StringValue = &value + return b +} + +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration { + b.VersionValue = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go new file mode 100644 index 000000000..dcb3504b8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// DeviceCapacityApplyConfiguration represents a declarative configuration of the DeviceCapacity type for use +// with apply. +type DeviceCapacityApplyConfiguration struct { + Value *resource.Quantity `json:"value,omitempty"` +} + +// DeviceCapacityApplyConfiguration constructs a declarative configuration of the DeviceCapacity type for use with +// apply. +func DeviceCapacity() *DeviceCapacityApplyConfiguration { + return &DeviceCapacityApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *DeviceCapacityApplyConfiguration) WithValue(value resource.Quantity) *DeviceCapacityApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go new file mode 100644 index 000000000..95c1c2e6e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use +// with apply. +type DeviceClaimApplyConfiguration struct { + Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"` + Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"` + Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with +// apply. +func DeviceClaim() *DeviceClaimApplyConfiguration { + return &DeviceClaimApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequests") + } + b.Requests = append(b.Requests, *values[i]) + } + return b +} + +// WithConstraints adds the given value to the Constraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Constraints field. +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConstraints") + } + b.Constraints = append(b.Constraints, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go new file mode 100644 index 000000000..beac5e9d9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use +// with apply. +type DeviceClaimConfigurationApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with +// apply. +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration { + return &DeviceClaimConfigurationApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..c71e22259 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use +// with apply. +type DeviceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"` +} + +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with +// apply. +func DeviceClass(name string) *DeviceClassApplyConfiguration { + b := &DeviceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API. +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "") +} + +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeviceClassStatus(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "status") +} + +func extractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) { + b := &DeviceClassApplyConfiguration{} + err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1beta1.DeviceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deviceClass.Name) + + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeviceClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go new file mode 100644 index 000000000..3ce90eab5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use +// with apply. +type DeviceClassConfigurationApplyConfiguration struct { + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with +// apply. +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { + return &DeviceClassConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go new file mode 100644 index 000000000..901b0800e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use +// with apply. +type DeviceClassSpecApplyConfiguration struct { + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with +// apply. +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration { + return &DeviceClassSpecApplyConfiguration{} +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go new file mode 100644 index 000000000..b0f41f5a1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use +// with apply. +type DeviceConfigurationApplyConfiguration struct { + Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"` +} + +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with +// apply. +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration { + return &DeviceConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go new file mode 100644 index 000000000..0c5fc2525 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use +// with apply. +type DeviceConstraintApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + MatchAttribute *resourcev1beta1.FullyQualifiedName `json:"matchAttribute,omitempty"` +} + +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with +// apply. +func DeviceConstraint() *DeviceConstraintApplyConfiguration { + return &DeviceConstraintApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchAttribute field is set to the value of the last call. +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value resourcev1beta1.FullyQualifiedName) *DeviceConstraintApplyConfiguration { + b.MatchAttribute = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go new file mode 100644 index 000000000..ea454a275 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use +// with apply. +type DeviceRequestApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DeviceClassName *string `json:"deviceClassName,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"` + Count *int64 `json:"count,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with +// apply. +func DeviceRequest() *DeviceRequestApplyConfiguration { + return &DeviceRequestApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration { + b.Name = &value + return b +} + +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceClassName field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration { + b.DeviceClassName = &value + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1beta1.DeviceAllocationMode) *DeviceRequestApplyConfiguration { + b.AllocationMode = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration { + b.Count = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go new file mode 100644 index 000000000..c28eb26ab --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use +// with apply. +type DeviceRequestAllocationResultApplyConfiguration struct { + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with +// apply. +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration { + return &DeviceRequestAllocationResultApplyConfiguration{} +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Request = &value + return b +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Device = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestAllocationResultApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go new file mode 100644 index 000000000..bf60bf434 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use +// with apply. +type DeviceSelectorApplyConfiguration struct { + CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"` +} + +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with +// apply. +func DeviceSelector() *DeviceSelectorApplyConfiguration { + return &DeviceSelectorApplyConfiguration{} +} + +// WithCEL sets the CEL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CEL field is set to the value of the last call. +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration { + b.CEL = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go new file mode 100644 index 000000000..c9d488019 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use +// with apply. +type NetworkDeviceDataApplyConfiguration struct { + InterfaceName *string `json:"interfaceName,omitempty"` + IPs []string `json:"ips,omitempty"` + HardwareAddress *string `json:"hardwareAddress,omitempty"` +} + +// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with +// apply. +func NetworkDeviceData() *NetworkDeviceDataApplyConfiguration { + return &NetworkDeviceDataApplyConfiguration{} +} + +// WithInterfaceName sets the InterfaceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InterfaceName field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithInterfaceName(value string) *NetworkDeviceDataApplyConfiguration { + b.InterfaceName = &value + return b +} + +// WithIPs adds the given value to the IPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IPs field. +func (b *NetworkDeviceDataApplyConfiguration) WithIPs(values ...string) *NetworkDeviceDataApplyConfiguration { + for i := range values { + b.IPs = append(b.IPs, values[i]) + } + return b +} + +// WithHardwareAddress sets the HardwareAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HardwareAddress field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithHardwareAddress(value string) *NetworkDeviceDataApplyConfiguration { + b.HardwareAddress = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go new file mode 100644 index 000000000..0b52fa93a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use +// with apply. +type OpaqueDeviceConfigurationApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` +} + +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with +// apply. +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration { + return &OpaqueDeviceConfigurationApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration { + b.Driver = &value + return b +} + +// WithParameters sets the Parameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Parameters field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration { + b.Parameters = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go similarity index 59% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go index ee8e73ebe..ee16718fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha3 +package v1beta1 import ( - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,156 +27,156 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use // with apply. -type PodSchedulingContextApplyConfiguration struct { +type ResourceClaimApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` } -// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with // apply. -func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { - b := &PodSchedulingContextApplyConfiguration{} +func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { + b := &ResourceClaimApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) - b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha3") + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1beta1") return b } -// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from -// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a -// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractResourceClaim extracts the applied configuration owned by fieldManager from +// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a +// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. -// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. +// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API. +// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { - return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") +func ExtractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "") } -// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except +// ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { - return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") +func ExtractResourceClaimStatus(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { - b := &PodSchedulingContextApplyConfiguration{} - err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource) +func extractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { + b := &ResourceClaimApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSchedulingContext.Name) - b.WithNamespace(podSchedulingContext.Namespace) + b.WithName(resourceClaim.Name) + b.WithNamespace(resourceClaim.Namespace) - b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha3") + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1beta1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { - b.Kind = &value +func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value return b } // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { - b.APIVersion = &value +func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value return b } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,13 +184,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds( // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,13 +199,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]s // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,15 +227,15 @@ func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...* // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } -func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -244,7 +244,7 @@ func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurat // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration { b.Spec = value return b } @@ -252,13 +252,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingCo // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { +func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration { b.Status = value return b } // GetName retrieves the value of the Name field in the declarative configuration. -func (b *PodSchedulingContextApplyConfiguration) GetName() *string { +func (b *ResourceClaimApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go new file mode 100644 index 000000000..f6eefdda5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use +// with apply. +type ResourceClaimConsumerReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Resource *string `json:"resource,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with +// apply. +func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { + return &ResourceClaimConsumerReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go new file mode 100644 index 000000000..c6b1b0b4b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.Devices = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go new file mode 100644 index 000000000..bb3db18be --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use +// with apply. +type ResourceClaimStatusApplyConfiguration struct { + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` + Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with +// apply. +func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { + return &ResourceClaimStatusApplyConfiguration{} +} + +// WithAllocation sets the Allocation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allocation field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + b.Allocation = value + return b +} + +// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ReservedFor field. +func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReservedFor") + } + b.ReservedFor = append(b.ReservedFor, *values[i]) + } + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceClaimStatusApplyConfiguration) WithDevices(values ...*AllocatedDeviceStatusApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..490ecf5e7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use +// with apply. +type ResourceClaimTemplateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with +// apply. +func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { + b := &ResourceClaimTemplateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from +// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a +// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API. +// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") +} + +// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") +} + +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { + b := &ResourceClaimTemplateApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaimTemplate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaimTemplate.Name) + b.WithNamespace(resourceClaimTemplate.Namespace) + + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go new file mode 100644 index 000000000..9df32360f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go @@ -0,0 +1,194 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use +// with apply. +type ResourceClaimTemplateSpecApplyConfiguration struct { + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with +// apply. +func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { + return &ResourceClaimTemplateSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go new file mode 100644 index 000000000..33c155b52 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use +// with apply. +type ResourcePoolApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"` +} + +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with +// apply. +func ResourcePool() *ResourcePoolApplyConfiguration { + return &ResourcePoolApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration { + b.Name = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration { + b.Generation = &value + return b +} + +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceSliceCount field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration { + b.ResourceSliceCount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..d169ad101 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use +// with apply. +type ResourceSliceApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with +// apply. +func ResourceSlice(name string) *ResourceSliceApplyConfiguration { + b := &ResourceSliceApplyConfiguration{} + b.WithName(name) + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractResourceSlice extracts the applied configuration owned by fieldManager from +// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a +// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API. +// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "") +} + +// ExtractResourceSliceStatus is the same as ExtractResourceSlice except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceSliceStatus(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "status") +} + +func extractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { + b := &ResourceSliceApplyConfiguration{} + err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceSlice"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceSlice.Name) + + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go new file mode 100644 index 000000000..75bbb53c8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use +// with apply. +type ResourceSliceSpecApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + AllNodes *bool `json:"allNodes,omitempty"` + Devices []DeviceApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with +// apply. +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration { + return &ResourceSliceSpecApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration { + b.NodeName = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllNodes field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration { + b.AllNodes = &value + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go index f2f135abc..24f122cc0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go @@ -21,22 +21,22 @@ package v1 import ( corev1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Value *int32 `json:"value,omitempty"` - GlobalDefault *bool `json:"globalDefault,omitempty"` - Description *string `json:"description,omitempty"` - PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Value *int32 `json:"value,omitempty"` + GlobalDefault *bool `json:"globalDefault,omitempty"` + Description *string `json:"description,omitempty"` + PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } // PriorityClass constructs a declarative configuration of the PriorityClass type for use with @@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { +func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { +func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityClassApplyConfiguration { +func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PriorityClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -277,5 +277,5 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go index 098517675..37a50ef6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go @@ -20,7 +20,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -60,18 +60,18 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "") } // ExtractPriorityClassStatus is the same as ExtractPriorityClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityClassStatus(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClassStatus(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "status") } -func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { +func extractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -277,5 +277,5 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go index 075862fe3..4b6d52039 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go @@ -20,7 +20,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/scheduling/v1beta1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -60,18 +60,18 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "") } // ExtractPriorityClassStatus is the same as ExtractPriorityClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityClassStatus(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClassStatus(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "status") } -func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { +func extractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -277,5 +277,5 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree // GetName retrieves the value of the Name field in the declarative configuration. func (b *PriorityClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go index 39d835702..6941e4cdc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apistoragev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storagev1 "k8s.io/api/storage/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } // CSIDriver constructs a declarative configuration of the CSIDriver type for use with @@ -56,18 +56,18 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { +func ExtractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { return extractCSIDriver(cSIDriver, fieldManager, "") } // ExtractCSIDriverStatus is the same as ExtractCSIDriver except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSIDriverStatus(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { +func ExtractCSIDriverStatus(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { return extractCSIDriver(cSIDriver, fieldManager, "status") } -func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) { +func extractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) { b := &CSIDriverApplyConfiguration{} err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApp // If called multiple times, the Name field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverA // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl // If called multiple times, the UID field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriv // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { +func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { +func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSI // overwriting an existing map entries in Annotations field with the same key. func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIDriverApplyConfiguration { +func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -249,5 +249,5 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSIDriverApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go index b2dcb0fee..1b58c6db8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" ) // CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use @@ -27,9 +27,9 @@ import ( type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"` - VolumeLifecycleModes []v1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` + VolumeLifecycleModes []storagev1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` StorageCapacity *bool `json:"storageCapacity,omitempty"` - FSGroupPolicy *v1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` + FSGroupPolicy *storagev1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` RequiresRepublish *bool `json:"requiresRepublish,omitempty"` SELinuxMount *bool `json:"seLinuxMount,omitempty"` @@ -60,7 +60,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithPodInfoOnMount(value bool) *CSIDri // WithVolumeLifecycleModes adds the given value to the VolumeLifecycleModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeLifecycleModes field. -func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...v1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...storagev1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { for i := range values { b.VolumeLifecycleModes = append(b.VolumeLifecycleModes, values[i]) } @@ -78,7 +78,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithStorageCapacity(value bool) *CSIDr // WithFSGroupPolicy sets the FSGroupPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroupPolicy field is set to the value of the last call. -func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value v1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value storagev1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { b.FSGroupPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go index 8a53e7984..f31620709 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apistoragev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storagev1 "k8s.io/api/storage/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } // CSINode constructs a declarative configuration of the CSINode type for use with @@ -56,18 +56,18 @@ func CSINode(name string) *CSINodeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSINode(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { +func ExtractCSINode(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { return extractCSINode(cSINode, fieldManager, "") } // ExtractCSINodeStatus is the same as ExtractCSINode except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSINodeStatus(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { +func ExtractCSINodeStatus(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { return extractCSINode(cSINode, fieldManager, "status") } -func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) { +func extractCSINode(cSINode *storagev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) { b := &CSINodeApplyConfiguration{} err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSINodeApplyConfiguration { +func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSINodeApplyConfiguration { +func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CS // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINo // overwriting an existing map entries in Annotations field with the same key. func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSINodeApplyConfiguration { +func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSINodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -249,5 +249,5 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSINodeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go index 0e293248d..226fb1f70 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go @@ -21,22 +21,22 @@ package v1 import ( storagev1 "k8s.io/api/storage/v1" resource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"` - StorageClassName *string `json:"storageClassName,omitempty"` - Capacity *resource.Quantity `json:"capacity,omitempty"` - MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + NodeTopology *metav1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + Capacity *resource.Quantity `json:"capacity,omitempty"` + MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } // CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with @@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI // If called multiple times, the Name field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS // If called multiple times, the UID field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -152,25 +152,25 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,13 +216,13 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -233,21 +233,21 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSIStorageCapacityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } // WithNodeTopology sets the NodeTopology field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeTopology field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *v1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *metav1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration { b.NodeTopology = value return b } @@ -279,5 +279,5 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go index 26d70bc8b..cab39900e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go @@ -21,26 +21,26 @@ package v1 import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Provisioner *string `json:"provisioner,omitempty"` - Parameters map[string]string `json:"parameters,omitempty"` - ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` - MountOptions []string `json:"mountOptions,omitempty"` - AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"` - VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` - AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Provisioner *string `json:"provisioner,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` + ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` + MountOptions []string `json:"mountOptions,omitempty"` + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"` + VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` + AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } // StorageClass constructs a declarative configuration of the StorageClass type for use with @@ -92,7 +92,7 @@ func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -100,7 +100,7 @@ func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -109,7 +109,7 @@ func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageCl // If called multiple times, the Name field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -118,7 +118,7 @@ func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -127,7 +127,7 @@ func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *Storage // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -136,7 +136,7 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla // If called multiple times, the UID field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -145,7 +145,7 @@ func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -154,25 +154,25 @@ func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *Stor // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -181,7 +181,7 @@ func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -191,11 +191,11 @@ func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -206,11 +206,11 @@ func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,13 +218,13 @@ func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -235,14 +235,14 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *StorageClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -318,5 +318,5 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc // GetName retrieves the value of the Name field in the declarative configuration. func (b *StorageClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go index 72c351208..b28b8c33f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apistoragev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storagev1 "k8s.io/api/storage/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"` - Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"` + Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } // VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with @@ -57,18 +57,18 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { +func ExtractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { return extractVolumeAttachment(volumeAttachment, fieldManager, "") } // ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractVolumeAttachmentStatus(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { +func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { return extractVolumeAttachment(volumeAttachment, fieldManager, "status") } -func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { +func extractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { b := &VolumeAttachmentApplyConfiguration{} err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { +func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { +func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string] // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttachmentApplyConfiguration { +func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *VolumeAttachmentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -258,5 +258,5 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS // GetName retrieves the value of the Name field in the declarative configuration. func (b *VolumeAttachmentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go index 477855398..1c865c001 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) // VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { - PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` - InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` + PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` + InlineVolumeSpec *corev1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } // VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with @@ -46,7 +46,7 @@ func (b *VolumeAttachmentSourceApplyConfiguration) WithPersistentVolumeName(valu // WithInlineVolumeSpec sets the InlineVolumeSpec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InlineVolumeSpec field is set to the value of the last call. -func (b *VolumeAttachmentSourceApplyConfiguration) WithInlineVolumeSpec(value *v1.PersistentVolumeSpecApplyConfiguration) *VolumeAttachmentSourceApplyConfiguration { +func (b *VolumeAttachmentSourceApplyConfiguration) WithInlineVolumeSpec(value *corev1.PersistentVolumeSpecApplyConfiguration) *VolumeAttachmentSourceApplyConfiguration { b.InlineVolumeSpec = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go index 039e5f32b..c16c5c3af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { - Time *v1.Time `json:"time,omitempty"` - Message *string `json:"message,omitempty"` + Time *metav1.Time `json:"time,omitempty"` + Message *string `json:"message,omitempty"` } // VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with @@ -38,7 +38,7 @@ func VolumeError() *VolumeErrorApplyConfiguration { // WithTime sets the Time field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Time field is set to the value of the last call. -func (b *VolumeErrorApplyConfiguration) WithTime(value v1.Time) *VolumeErrorApplyConfiguration { +func (b *VolumeErrorApplyConfiguration) WithTime(value metav1.Time) *VolumeErrorApplyConfiguration { b.Time = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go index aa949e28c..518f7a7f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -61,18 +61,18 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") } // ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") } -func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { +func extractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { b := &CSIStorageCapacityApplyConfiguration{} err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b, subresource) if err != nil { @@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI // If called multiple times, the Name field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS // If called multiple times, the UID field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -152,7 +152,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -161,7 +161,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -170,7 +170,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -222,7 +222,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -233,7 +233,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -279,5 +279,5 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go index 9648621ac..b66cf0094 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go @@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *Volume // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS // GetName retrieves the value of the Name field in the declarative configuration. func (b *VolumeAttachmentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go index f95bc5547..898726b62 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -57,18 +57,18 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { +func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") } // ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") } -func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { +func extractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { b := &VolumeAttributesClassApplyConfiguration{} err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttribut // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *Volume // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *Volume // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *V // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *Volu // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *V // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value me // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -264,5 +264,5 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str // GetName retrieves the value of the Name field in the declarative configuration. func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go index b9a807bd8..0fe9421de 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go @@ -84,7 +84,7 @@ func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApp // If called multiple times, the Name field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverA // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl // If called multiple times, the UID field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriv // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSI // overwriting an existing map entries in Annotations field with the same key. func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSIDriverApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go index 5f4e068f0..e62fe5888 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go @@ -19,20 +19,20 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" ) // CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { - AttachRequired *bool `json:"attachRequired,omitempty"` - PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"` - VolumeLifecycleModes []v1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` - StorageCapacity *bool `json:"storageCapacity,omitempty"` - FSGroupPolicy *v1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` - TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` - RequiresRepublish *bool `json:"requiresRepublish,omitempty"` - SELinuxMount *bool `json:"seLinuxMount,omitempty"` + AttachRequired *bool `json:"attachRequired,omitempty"` + PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"` + VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` + StorageCapacity *bool `json:"storageCapacity,omitempty"` + FSGroupPolicy *storagev1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` + TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` + RequiresRepublish *bool `json:"requiresRepublish,omitempty"` + SELinuxMount *bool `json:"seLinuxMount,omitempty"` } // CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with @@ -60,7 +60,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithPodInfoOnMount(value bool) *CSIDri // WithVolumeLifecycleModes adds the given value to the VolumeLifecycleModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeLifecycleModes field. -func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...v1beta1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...storagev1beta1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { for i := range values { b.VolumeLifecycleModes = append(b.VolumeLifecycleModes, values[i]) } @@ -78,7 +78,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithStorageCapacity(value bool) *CSIDr // WithFSGroupPolicy sets the FSGroupPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroupPolicy field is set to the value of the last call. -func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value v1beta1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value storagev1beta1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { b.FSGroupPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go index af0f41cf0..4e7ad8997 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go @@ -84,7 +84,7 @@ func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subres // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CS // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CS // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINo // overwriting an existing map entries in Annotations field with the same key. func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -249,5 +249,5 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSINodeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go index 19350e5a6..c8acaf923 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -61,18 +61,18 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") } // ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") } -func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { +func extractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { b := &CSIStorageCapacityApplyConfiguration{} err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b, subresource) if err != nil { @@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI // If called multiple times, the Name field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS // If called multiple times, the UID field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -152,7 +152,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -161,7 +161,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -170,7 +170,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -222,7 +222,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -233,7 +233,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -279,5 +279,5 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go index fa504a44e..2d211754e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go @@ -20,7 +20,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -39,7 +39,7 @@ type StorageClassApplyConfiguration struct { ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` MountOptions []string `json:"mountOptions,omitempty"` AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"` - VolumeBindingMode *v1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` + VolumeBindingMode *storagev1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } @@ -64,18 +64,18 @@ func StorageClass(name string) *StorageClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { +func ExtractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { return extractStorageClass(storageClass, fieldManager, "") } // ExtractStorageClassStatus is the same as ExtractStorageClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStorageClassStatus(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { +func ExtractStorageClassStatus(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { return extractStorageClass(storageClass, fieldManager, "status") } -func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) { +func extractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) { b := &StorageClassApplyConfiguration{} err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b, subresource) if err != nil { @@ -92,7 +92,7 @@ func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -100,7 +100,7 @@ func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -109,7 +109,7 @@ func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageCl // If called multiple times, the Name field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -118,7 +118,7 @@ func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -127,7 +127,7 @@ func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *Storage // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -136,7 +136,7 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla // If called multiple times, the UID field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -145,7 +145,7 @@ func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -154,7 +154,7 @@ func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *Stor // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -163,7 +163,7 @@ func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -172,7 +172,7 @@ func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -181,7 +181,7 @@ func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -191,11 +191,11 @@ func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -206,11 +206,11 @@ func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -224,7 +224,7 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -235,7 +235,7 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -297,7 +297,7 @@ func (b *StorageClassApplyConfiguration) WithAllowVolumeExpansion(value bool) *S // WithVolumeBindingMode sets the VolumeBindingMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VolumeBindingMode field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithVolumeBindingMode(value v1beta1.VolumeBindingMode) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithVolumeBindingMode(value storagev1beta1.VolumeBindingMode) *StorageClassApplyConfiguration { b.VolumeBindingMode = &value return b } @@ -318,5 +318,5 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc // GetName retrieves the value of the Name field in the declarative configuration. func (b *StorageClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go index b0711d731..3f7110bf4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go @@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *Volume // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS // GetName retrieves the value of the Name field in the declarative configuration. func (b *VolumeAttachmentApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go index 7b221d277..ab1bda330 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -57,18 +57,18 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { +func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") } // ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") } -func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { +func extractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { b := &VolumeAttributesClassApplyConfiguration{} err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttribute // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *Volume // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *Volume // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *V // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *Volu // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *V // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value me // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -264,5 +264,5 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str // GetName retrieves the value of the Name field in the declarative configuration. func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go index dcdbc60c7..5ffd572ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go @@ -20,18 +20,18 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use // with apply. type MigrationConditionApplyConfiguration struct { - Type *v1alpha1.MigrationConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *storagemigrationv1alpha1.MigrationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with @@ -43,7 +43,7 @@ func MigrationCondition() *MigrationConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MigrationConditionApplyConfiguration) WithType(value v1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration { +func (b *MigrationConditionApplyConfiguration) WithType(value storagemigrationv1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go index 7e6452a77..a6dbc13a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go @@ -85,7 +85,7 @@ func extractStorageVersionMigration(storageVersionMigration *storagemigrationv1a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *StorageVersionMigrationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *Stor // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) *StorageVersionMigrationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *Stor // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *St // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *StorageVersionMigrationApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values . func (b *StorageVersionMigrationApplyConfiguration) WithFinalizers(values ...string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -258,5 +258,5 @@ func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVer // GetName retrieves the value of the Name field in the declarative configuration. func (b *StorageVersionMigrationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/utils.go b/vendor/k8s.io/client-go/applyconfigurations/utils.go index 0955b8f44..afbabac94 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/utils.go +++ b/vendor/k8s.io/client-go/applyconfigurations/utils.go @@ -36,7 +36,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" - coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + v1alpha2 "k8s.io/api/coordination/v1alpha2" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -61,6 +61,7 @@ import ( rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -88,7 +89,7 @@ import ( applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" - applyconfigurationscoordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/client-go/applyconfigurations/coordination/v1alpha2" applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" @@ -115,6 +116,7 @@ import ( applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" @@ -178,14 +180,28 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &admissionregistrationv1.WebhookClientConfigApplyConfiguration{} // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("ApplyConfiguration"): + return &admissionregistrationv1alpha1.ApplyConfigurationApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("AuditAnnotation"): return &admissionregistrationv1alpha1.AuditAnnotationApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ExpressionWarning"): return &admissionregistrationv1alpha1.ExpressionWarningApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("JSONPatch"): + return &admissionregistrationv1alpha1.JSONPatchApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MatchCondition"): return &admissionregistrationv1alpha1.MatchConditionApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MatchResources"): return &admissionregistrationv1alpha1.MatchResourcesApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MutatingAdmissionPolicy"): + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MutatingAdmissionPolicyBinding"): + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MutatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MutatingAdmissionPolicySpec"): + return &admissionregistrationv1alpha1.MutatingAdmissionPolicySpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Mutation"): + return &admissionregistrationv1alpha1.MutationApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): return &admissionregistrationv1alpha1.NamedRuleWithOperationsApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ParamKind"): @@ -615,11 +631,11 @@ func ForKind(kind schema.GroupVersionKind) interface{} { case coordinationv1.SchemeGroupVersion.WithKind("LeaseSpec"): return &applyconfigurationscoordinationv1.LeaseSpecApplyConfiguration{} - // Group=coordination.k8s.io, Version=v1alpha1 - case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate"): - return &applyconfigurationscoordinationv1alpha1.LeaseCandidateApplyConfiguration{} - case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidateSpec"): - return &applyconfigurationscoordinationv1alpha1.LeaseCandidateSpecApplyConfiguration{} + // Group=coordination.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithKind("LeaseCandidate"): + return &coordinationv1alpha2.LeaseCandidateApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("LeaseCandidateSpec"): + return &coordinationv1alpha2.LeaseCandidateSpecApplyConfiguration{} // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithKind("Lease"): @@ -1566,6 +1582,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &applyconfigurationsrbacv1beta1.SubjectApplyConfiguration{} // Group=resource.k8s.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithKind("AllocatedDeviceStatus"): + return &resourcev1alpha3.AllocatedDeviceStatusApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("AllocationResult"): return &resourcev1alpha3.AllocationResultApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("BasicDevice"): @@ -1600,20 +1618,14 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &resourcev1alpha3.DeviceRequestAllocationResultApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("DeviceSelector"): return &resourcev1alpha3.DeviceSelectorApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("NetworkDeviceData"): + return &resourcev1alpha3.NetworkDeviceDataApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("OpaqueDeviceConfiguration"): return &resourcev1alpha3.OpaqueDeviceConfigurationApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext"): - return &resourcev1alpha3.PodSchedulingContextApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"): - return &resourcev1alpha3.PodSchedulingContextSpecApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextStatus"): - return &resourcev1alpha3.PodSchedulingContextStatusApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim"): return &resourcev1alpha3.ResourceClaimApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): return &resourcev1alpha3.ResourceClaimConsumerReferenceApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"): - return &resourcev1alpha3.ResourceClaimSchedulingStatusApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSpec"): return &resourcev1alpha3.ResourceClaimSpecApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimStatus"): @@ -1629,6 +1641,68 @@ func ForKind(kind schema.GroupVersionKind) interface{} { case v1alpha3.SchemeGroupVersion.WithKind("ResourceSliceSpec"): return &resourcev1alpha3.ResourceSliceSpecApplyConfiguration{} + // Group=resource.k8s.io, Version=v1beta1 + case resourcev1beta1.SchemeGroupVersion.WithKind("AllocatedDeviceStatus"): + return &applyconfigurationsresourcev1beta1.AllocatedDeviceStatusApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("AllocationResult"): + return &applyconfigurationsresourcev1beta1.AllocationResultApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("BasicDevice"): + return &applyconfigurationsresourcev1beta1.BasicDeviceApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("CELDeviceSelector"): + return &applyconfigurationsresourcev1beta1.CELDeviceSelectorApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("Device"): + return &applyconfigurationsresourcev1beta1.DeviceApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceAllocationConfiguration"): + return &applyconfigurationsresourcev1beta1.DeviceAllocationConfigurationApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceAllocationResult"): + return &applyconfigurationsresourcev1beta1.DeviceAllocationResultApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceAttribute"): + return &applyconfigurationsresourcev1beta1.DeviceAttributeApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceCapacity"): + return &applyconfigurationsresourcev1beta1.DeviceCapacityApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceClaim"): + return &applyconfigurationsresourcev1beta1.DeviceClaimApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceClaimConfiguration"): + return &applyconfigurationsresourcev1beta1.DeviceClaimConfigurationApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceClass"): + return &applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceClassConfiguration"): + return &applyconfigurationsresourcev1beta1.DeviceClassConfigurationApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceClassSpec"): + return &applyconfigurationsresourcev1beta1.DeviceClassSpecApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceConfiguration"): + return &applyconfigurationsresourcev1beta1.DeviceConfigurationApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceConstraint"): + return &applyconfigurationsresourcev1beta1.DeviceConstraintApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceRequest"): + return &applyconfigurationsresourcev1beta1.DeviceRequestApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceRequestAllocationResult"): + return &applyconfigurationsresourcev1beta1.DeviceRequestAllocationResultApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("DeviceSelector"): + return &applyconfigurationsresourcev1beta1.DeviceSelectorApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("NetworkDeviceData"): + return &applyconfigurationsresourcev1beta1.NetworkDeviceDataApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("OpaqueDeviceConfiguration"): + return &applyconfigurationsresourcev1beta1.OpaqueDeviceConfigurationApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceClaim"): + return &applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): + return &applyconfigurationsresourcev1beta1.ResourceClaimConsumerReferenceApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceClaimSpec"): + return &applyconfigurationsresourcev1beta1.ResourceClaimSpecApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceClaimStatus"): + return &applyconfigurationsresourcev1beta1.ResourceClaimStatusApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceClaimTemplate"): + return &applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceClaimTemplateSpec"): + return &applyconfigurationsresourcev1beta1.ResourceClaimTemplateSpecApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourcePool"): + return &applyconfigurationsresourcev1beta1.ResourcePoolApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceSlice"): + return &applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration{} + case resourcev1beta1.SchemeGroupVersion.WithKind("ResourceSliceSpec"): + return &applyconfigurationsresourcev1beta1.ResourceSliceSpecApplyConfiguration{} + // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithKind("PriorityClass"): return &applyconfigurationsschedulingv1.PriorityClassApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/dynamic/scheme.go b/vendor/k8s.io/client-go/dynamic/scheme.go index 3168c872c..28316f1dd 100644 --- a/vendor/k8s.io/client-go/dynamic/scheme.go +++ b/vendor/k8s.io/client-go/dynamic/scheme.go @@ -21,52 +21,66 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor" "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/client-go/features" ) -var watchScheme = runtime.NewScheme() var basicScheme = runtime.NewScheme() -var deleteScheme = runtime.NewScheme() var parameterScheme = runtime.NewScheme() -var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) var versionV1 = schema.GroupVersion{Version: "v1"} func init() { - metav1.AddToGroupVersion(watchScheme, versionV1) metav1.AddToGroupVersion(basicScheme, versionV1) metav1.AddToGroupVersion(parameterScheme, versionV1) - metav1.AddToGroupVersion(deleteScheme, versionV1) } -// basicNegotiatedSerializer is used to handle discovery and error handling serialization -type basicNegotiatedSerializer struct{} - -func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { - return []runtime.SerializerInfo{ +func newBasicNegotiatedSerializer() basicNegotiatedSerializer { + supportedMediaTypes := []runtime.SerializerInfo{ { MediaType: "application/json", MediaTypeType: "application", MediaTypeSubType: "json", EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, true), + Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, json.SerializerOptions{}), + PrettySerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, json.SerializerOptions{Pretty: true}), StreamSerializer: &runtime.StreamSerializerInfo{ EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), + Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, basicScheme, basicScheme, json.SerializerOptions{}), Framer: json.Framer, }, }, } + if features.FeatureGates().Enabled(features.ClientsAllowCBOR) { + supportedMediaTypes = append(supportedMediaTypes, runtime.SerializerInfo{ + MediaType: "application/cbor", + MediaTypeType: "application", + MediaTypeSubType: "cbor", + Serializer: cbor.NewSerializer(unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}), + StreamSerializer: &runtime.StreamSerializerInfo{ + Serializer: cbor.NewSerializer(basicScheme, basicScheme, cbor.Transcode(false)), + Framer: cbor.NewFramer(), + }, + }) + } + return basicNegotiatedSerializer{supportedMediaTypes: supportedMediaTypes} +} + +type basicNegotiatedSerializer struct { + supportedMediaTypes []runtime.SerializerInfo +} + +func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { + return s.supportedMediaTypes } func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { return runtime.WithVersionEncoder{ Version: gv, Encoder: encoder, - ObjectTyper: unstructuredTyper{basicScheme}, + ObjectTyper: permissiveTyper{basicScheme}, } } @@ -106,3 +120,25 @@ func (t unstructuredTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersio func (t unstructuredTyper) Recognizes(gvk schema.GroupVersionKind) bool { return true } + +// The dynamic client has historically accepted Unstructured objects with missing or empty +// apiVersion and/or kind as arguments to its write request methods. This typer will return the type +// of a runtime.Unstructured with no error, even if the type is missing or empty. +type permissiveTyper struct { + nested runtime.ObjectTyper +} + +func (t permissiveTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + kinds, unversioned, err := t.nested.ObjectKinds(obj) + if err == nil { + return kinds, unversioned, nil + } + if _, ok := obj.(runtime.Unstructured); ok { + return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil + } + return nil, false, err +} + +func (t permissiveTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return true +} diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 326da7cbd..62b2999ca 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -25,11 +25,12 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/features" "k8s.io/client-go/rest" + "k8s.io/client-go/util/apply" "k8s.io/client-go/util/consistencydetector" "k8s.io/client-go/util/watchlist" "k8s.io/klog/v2" @@ -45,9 +46,17 @@ var _ Interface = &DynamicClient{} // appropriate dynamic client defaults set. func ConfigFor(inConfig *rest.Config) *rest.Config { config := rest.CopyConfig(inConfig) - config.AcceptContentTypes = "application/json" + config.ContentType = "application/json" - config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types + config.AcceptContentTypes = "application/json" + if features.FeatureGates().Enabled(features.ClientsAllowCBOR) { + config.AcceptContentTypes = "application/json;q=0.9,application/cbor;q=1" + if features.FeatureGates().Enabled(features.ClientsPreferCBOR) { + config.ContentType = "application/cbor" + } + } + + config.NegotiatedSerializer = newBasicNegotiatedSerializer() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } @@ -86,11 +95,10 @@ func NewForConfig(inConfig *rest.Config) (*DynamicClient, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (*DynamicClient, error) { config := ConfigFor(inConfig) - // for serializing the options - config.GroupVersion = &schema.GroupVersion{} + config.GroupVersion = nil config.APIPath = "/if-you-see-this-search-for-the-break" - restClient, err := rest.RESTClientForConfigAndClient(config, h) + restClient, err := rest.UnversionedRESTClientForConfigAndClient(config, h) if err != nil { return nil, err } @@ -114,10 +122,6 @@ func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface { } func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } name := "" if len(subresources) > 0 { accessor, err := meta.Accessor(obj) @@ -133,26 +137,17 @@ func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Un return nil, err } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Post(). AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). + Body(obj). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { @@ -167,31 +162,18 @@ func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Un if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Put(). AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). + Body(obj). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { @@ -206,31 +188,18 @@ func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructu if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Put(). AbsPath(append(c.makeURLSegments(name), "status")...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). + Body(obj). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { @@ -240,16 +209,11 @@ func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts me if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return err } - deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) - if err != nil { - return err - } result := c.client.client. Delete(). AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(deleteOptionsByte). + Body(&opts). Do(ctx) return result.Error() } @@ -259,16 +223,10 @@ func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav return err } - deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) - if err != nil { - return err - } - result := c.client.client. Delete(). AbsPath(c.makeURLSegments("")...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(deleteOptionsByte). + Body(&opts). SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). Do(ctx) return result.Error() @@ -281,19 +239,15 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { + var out unstructured.Unstructured + if err := c.client.client. + Get(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx).Into(&out); err != nil { return nil, err } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { @@ -318,27 +272,15 @@ func (c *dynamicResourceClient) list(ctx context.Context, opts metav1.ListOption if err := validateNamespaceWithOptionalName(c.namespace); err != nil { return nil, err } - result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - if list, ok := uncastObj.(*unstructured.UnstructuredList); ok { - return list, nil - } - - list, err := uncastObj.(*unstructured.Unstructured).ToList() - if err != nil { + var out unstructured.UnstructuredList + if err := c.client.client. + Get(). + AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx).Into(&out); err != nil { return nil, err } - return list, nil + return &out, nil } // watchList establishes a watch stream with the server and returns an unstructured list. @@ -379,24 +321,16 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(data). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { @@ -406,10 +340,6 @@ func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *uns if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } accessor, err := meta.Accessor(obj) if err != nil { return nil, err @@ -421,25 +351,21 @@ func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *uns } patchOpts := opts.ToPatchOptions() - result := c.client.client. - Patch(types.ApplyPatchType). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - Body(outBytes). - SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() + request, err := apply.NewRequest(c.client.client, obj.Object) if err != nil { return nil, err } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { + + var out unstructured.Unstructured + if err := request. + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). + Do(ctx).Into(&out); err != nil { return nil, err } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } + func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) { return c.Apply(ctx, name, obj, opts, "status") } diff --git a/vendor/k8s.io/client-go/features/features.go b/vendor/k8s.io/client-go/features/features.go index afb67f509..5ccdcc55f 100644 --- a/vendor/k8s.io/client-go/features/features.go +++ b/vendor/k8s.io/client-go/features/features.go @@ -18,9 +18,9 @@ package features import ( "errors" + "sync/atomic" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "sync/atomic" ) // NOTE: types Feature, FeatureSpec, prerelease (and its values) diff --git a/vendor/k8s.io/client-go/features/known_features.go b/vendor/k8s.io/client-go/features/known_features.go index 0c972a46f..a74f6a833 100644 --- a/vendor/k8s.io/client-go/features/known_features.go +++ b/vendor/k8s.io/client-go/features/known_features.go @@ -28,6 +28,31 @@ const ( // of code conflicts because changes are more likely to be scattered // across the file. + // owner: @benluddy + // kep: https://kep.k8s.io/4222 + // alpha: 1.32 + // + // If disabled, clients configured to accept "application/cbor" will instead accept + // "application/json" with the same relative preference, and clients configured to write + // "application/cbor" or "application/apply-patch+cbor" will instead write + // "application/json" or "application/apply-patch+yaml", respectively. + ClientsAllowCBOR Feature = "ClientsAllowCBOR" + + // owner: @benluddy + // kep: https://kep.k8s.io/4222 + // alpha: 1.32 + // + // If enabled, and only if ClientsAllowCBOR is also enabled, the default request content + // type (if not explicitly configured) and the dynamic client's request content type both + // become "application/cbor" instead of "application/json". The default content type for + // apply patch requests becomes "application/apply-patch+cbor" instead of + // "application/apply-patch+yaml". + ClientsPreferCBOR Feature = "ClientsPreferCBOR" + + // owner: @nilekhc + // alpha: v1.30 + InformerResourceVersion Feature = "InformerResourceVersion" + // owner: @p0lyn0mial // beta: v1.30 // @@ -37,10 +62,6 @@ const ( // The feature is disabled in Beta by default because // it will only be turned on for selected control plane component(s). WatchListClient Feature = "WatchListClient" - - // owner: @nilekhc - // alpha: v1.30 - InformerResourceVersion Feature = "InformerResourceVersion" ) // defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. @@ -49,6 +70,8 @@ const ( // After registering with the binary, the features are, by default, controllable using environment variables. // For more details, please see envVarFeatureGates implementation. var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{ - WatchListClient: {Default: false, PreRelease: Beta}, + ClientsAllowCBOR: {Default: false, PreRelease: Alpha}, + ClientsPreferCBOR: {Default: false, PreRelease: Alpha}, InformerResourceVersion: {Default: false, PreRelease: Alpha}, + WatchListClient: {Default: false, PreRelease: Beta}, } diff --git a/vendor/k8s.io/client-go/gentype/fake.go b/vendor/k8s.io/client-go/gentype/fake.go new file mode 100644 index 000000000..bcb9ca27f --- /dev/null +++ b/vendor/k8s.io/client-go/gentype/fake.go @@ -0,0 +1,304 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gentype + +import ( + "context" + json "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClient represents a fake client +type FakeClient[T objectWithMeta] struct { + *testing.Fake + ns string + resource schema.GroupVersionResource + kind schema.GroupVersionKind + newObject func() T +} + +// FakeClientWithList represents a fake client with support for lists. +type FakeClientWithList[T objectWithMeta, L runtime.Object] struct { + *FakeClient[T] + alsoFakeLister[T, L] +} + +// FakeClientWithApply represents a fake client with support for apply declarative configurations. +type FakeClientWithApply[T objectWithMeta, C namedObject] struct { + *FakeClient[T] + alsoFakeApplier[T, C] +} + +// FakeClientWithListAndApply represents a fake client with support for lists and apply declarative configurations. +type FakeClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct { + *FakeClient[T] + alsoFakeLister[T, L] + alsoFakeApplier[T, C] +} + +// Helper types for composition +type alsoFakeLister[T objectWithMeta, L runtime.Object] struct { + client *FakeClient[T] + newList func() L + copyListMeta func(L, L) + getItems func(L) []T + setItems func(L, []T) +} + +type alsoFakeApplier[T objectWithMeta, C namedObject] struct { + client *FakeClient[T] +} + +// NewFakeClient constructs a fake client, namespaced or not, with no support for lists or apply. +// Non-namespaced clients are constructed by passing an empty namespace (""). +func NewFakeClient[T objectWithMeta]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, +) *FakeClient[T] { + return &FakeClient[T]{fake, namespace, resource, kind, emptyObjectCreator} +} + +// NewFakeClientWithList constructs a namespaced client with support for lists. +func NewFakeClientWithList[T objectWithMeta, L runtime.Object]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, + emptyListCreator func() L, listMetaCopier func(L, L), itemGetter func(L) []T, itemSetter func(L, []T), +) *FakeClientWithList[T, L] { + fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator) + return &FakeClientWithList[T, L]{ + fakeClient, + alsoFakeLister[T, L]{fakeClient, emptyListCreator, listMetaCopier, itemGetter, itemSetter}, + } +} + +// NewFakeClientWithApply constructs a namespaced client with support for apply declarative configurations. +func NewFakeClientWithApply[T objectWithMeta, C namedObject]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, +) *FakeClientWithApply[T, C] { + fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator) + return &FakeClientWithApply[T, C]{ + fakeClient, + alsoFakeApplier[T, C]{fakeClient}, + } +} + +// NewFakeClientWithListAndApply constructs a client with support for lists and applying declarative configurations. +func NewFakeClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, + emptyListCreator func() L, listMetaCopier func(L, L), itemGetter func(L) []T, itemSetter func(L, []T), +) *FakeClientWithListAndApply[T, L, C] { + fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator) + return &FakeClientWithListAndApply[T, L, C]{ + fakeClient, + alsoFakeLister[T, L]{fakeClient, emptyListCreator, listMetaCopier, itemGetter, itemSetter}, + alsoFakeApplier[T, C]{fakeClient}, + } +} + +// Get takes name of a resource, and returns the corresponding object, and an error if there is any. +func (c *FakeClient[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { + emptyResult := c.newObject() + + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(c.resource, c.ns, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +func ToPointerSlice[T any](src []T) []*T { + if src == nil { + return nil + } + result := make([]*T, len(src)) + for i := range src { + result[i] = &src[i] + } + return result +} + +func FromPointerSlice[T any](src []*T) []T { + if src == nil { + return nil + } + result := make([]T, len(src)) + for i := range src { + result[i] = *src[i] + } + return result +} + +// List takes label and field selectors, and returns the list of resources that match those selectors. +func (l *alsoFakeLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (result L, err error) { + emptyResult := l.newList() + obj, err := l.client.Fake. + Invokes(testing.NewListActionWithOptions(l.client.resource, l.client.kind, l.client.ns, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + // Everything matches + return obj.(L), nil + } + list := l.newList() + l.copyListMeta(list, obj.(L)) + var items []T + for _, item := range l.getItems(obj.(L)) { + itemMeta, err := meta.Accessor(item) + if err != nil { + // No ObjectMeta, nothing can match + continue + } + if label.Matches(labels.Set(itemMeta.GetLabels())) { + items = append(items, item) + } + } + l.setItems(list, items) + return list, err +} + +// Watch returns a watch.Interface that watches the requested resources. +func (c *FakeClient[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(c.resource, c.ns, opts)) +} + +// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *FakeClient[T]) Create(ctx context.Context, resource T, opts metav1.CreateOptions) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(c.resource, c.ns, resource, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *FakeClient[T]) Update(ctx context.Context, resource T, opts metav1.UpdateOptions) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(c.resource, c.ns, resource, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// UpdateStatus updates the resource's status and returns the updated resource. +func (c *FakeClient[T]) UpdateStatus(ctx context.Context, resource T, opts metav1.UpdateOptions) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.resource, "status", c.ns, resource, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// Delete deletes the resource matching the given name. Returns an error if one occurs. +func (c *FakeClient[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(c.resource, c.ns, name, opts), c.newObject()) + return err +} + +// DeleteCollection deletes a collection of objects. +func (l *alsoFakeLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + _, err := l.client.Fake. + Invokes(testing.NewDeleteCollectionActionWithOptions(l.client.resource, l.client.ns, opts, listOpts), l.newList()) + return err +} + +// Patch applies the patch and returns the patched resource. +func (c *FakeClient[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(c.resource, c.ns, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resource. +func (a *alsoFakeApplier[T, C]) Apply(ctx context.Context, configuration C, opts metav1.ApplyOptions) (result T, err error) { + if configuration == *new(C) { + return *new(T), fmt.Errorf("configuration provided to Apply must not be nil") + } + data, err := json.Marshal(configuration) + if err != nil { + return *new(T), err + } + name := configuration.GetName() + if name == nil { + return *new(T), fmt.Errorf("configuration.Name must be provided to Apply") + } + emptyResult := a.client.newObject() + obj, err := a.client.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(a.client.resource, a.client.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// ApplyStatus applies the given apply declarative configuration to the resource's status and returns the updated resource. +func (a *alsoFakeApplier[T, C]) ApplyStatus(ctx context.Context, configuration C, opts metav1.ApplyOptions) (result T, err error) { + if configuration == *new(C) { + return *new(T), fmt.Errorf("configuration provided to Apply must not be nil") + } + data, err := json.Marshal(configuration) + if err != nil { + return *new(T), err + } + name := configuration.GetName() + if name == nil { + return *new(T), fmt.Errorf("configuration.Name must be provided to Apply") + } + emptyResult := a.client.newObject() + obj, err := a.client.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(a.client.resource, a.client.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +func (c *FakeClient[T]) Namespace() string { + return c.ns +} + +func (c *FakeClient[T]) Kind() schema.GroupVersionKind { + return c.kind +} + +func (c *FakeClient[T]) Resource() schema.GroupVersionResource { + return c.resource +} diff --git a/vendor/k8s.io/client-go/gentype/type.go b/vendor/k8s.io/client-go/gentype/type.go index b5be84318..e6ed6aae7 100644 --- a/vendor/k8s.io/client-go/gentype/type.go +++ b/vendor/k8s.io/client-go/gentype/type.go @@ -18,7 +18,6 @@ package gentype import ( "context" - json "encoding/json" "fmt" "time" @@ -27,6 +26,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + "k8s.io/client-go/util/apply" "k8s.io/client-go/util/consistencydetector" "k8s.io/client-go/util/watchlist" "k8s.io/klog/v2" @@ -51,6 +51,8 @@ type Client[T objectWithMeta] struct { namespace string // "" for non-namespaced clients newObject func() T parameterCodec runtime.ParameterCodec + + prefersProtobuf bool } // ClientWithList represents a client with support for lists. @@ -82,26 +84,37 @@ type alsoApplier[T objectWithMeta, C namedObject] struct { client *Client[T] } +type Option[T objectWithMeta] func(*Client[T]) + +func PrefersProtobuf[T objectWithMeta]() Option[T] { + return func(c *Client[T]) { c.prefersProtobuf = true } +} + // NewClient constructs a client, namespaced or not, with no support for lists or apply. // Non-namespaced clients are constructed by passing an empty namespace (""). func NewClient[T objectWithMeta]( resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + options ...Option[T], ) *Client[T] { - return &Client[T]{ + c := &Client[T]{ resource: resource, client: client, parameterCodec: parameterCodec, namespace: namespace, newObject: emptyObjectCreator, } + for _, option := range options { + option(c) + } + return c } // NewClientWithList constructs a namespaced client with support for lists. func NewClientWithList[T objectWithMeta, L runtime.Object]( resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, - emptyListCreator func() L, + emptyListCreator func() L, options ...Option[T], ) *ClientWithList[T, L] { - typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...) return &ClientWithList[T, L]{ typeClient, alsoLister[T, L]{typeClient, emptyListCreator}, @@ -111,8 +124,9 @@ func NewClientWithList[T objectWithMeta, L runtime.Object]( // NewClientWithApply constructs a namespaced client with support for apply declarative configurations. func NewClientWithApply[T objectWithMeta, C namedObject]( resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + options ...Option[T], ) *ClientWithApply[T, C] { - typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...) return &ClientWithApply[T, C]{ typeClient, alsoApplier[T, C]{typeClient}, @@ -122,9 +136,9 @@ func NewClientWithApply[T objectWithMeta, C namedObject]( // NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations. func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, - emptyListCreator func() L, + emptyListCreator func() L, options ...Option[T], ) *ClientWithListAndApply[T, L, C] { - typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...) return &ClientWithListAndApply[T, L, C]{ typeClient, alsoLister[T, L]{typeClient, emptyListCreator}, @@ -146,6 +160,7 @@ func (c *Client[T]) GetNamespace() string { func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { result := c.newObject() err := c.client.Get(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). Name(name). @@ -181,6 +196,7 @@ func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } err := l.client.client.Get(). + UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf). NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). Resource(l.client.resource). VersionedParams(&opts, l.client.parameterCodec). @@ -198,6 +214,7 @@ func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOption } result = l.newList() err = l.client.client.Get(). + UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf). NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). Resource(l.client.resource). VersionedParams(&opts, l.client.parameterCodec). @@ -215,6 +232,7 @@ func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } opts.Watch = true return c.client.Get(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). VersionedParams(&opts, c.parameterCodec). @@ -226,6 +244,7 @@ func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) { result := c.newObject() err := c.client.Post(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). VersionedParams(&opts, c.parameterCodec). @@ -239,6 +258,7 @@ func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { result := c.newObject() err := c.client.Put(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). Name(obj.GetName()). @@ -253,6 +273,7 @@ func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { result := c.newObject() err := c.client.Put(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). Name(obj.GetName()). @@ -267,6 +288,7 @@ func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateO // Delete takes name of the resource and deletes it. Returns an error if one occurs. func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). Name(name). @@ -282,6 +304,7 @@ func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.Del timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second } return l.client.client.Delete(). + UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf). NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). Resource(l.client.resource). VersionedParams(&listOpts, l.client.parameterCodec). @@ -295,6 +318,7 @@ func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.Del func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) { result := c.newObject() err := c.client.Patch(pt). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(c.resource). Name(name). @@ -313,19 +337,21 @@ func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyO return *new(T), fmt.Errorf("object provided to Apply must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(obj) - if err != nil { - return *new(T), err - } if obj.GetName() == nil { return *new(T), fmt.Errorf("obj.Name must be provided to Apply") } - err = a.client.client.Patch(types.ApplyPatchType). + + request, err := apply.NewRequest(a.client.client, obj) + if err != nil { + return *new(T), err + } + + err = request. + UseProtobufAsDefaultIfPreferred(a.client.prefersProtobuf). NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). Resource(a.client.resource). Name(*obj.GetName()). VersionedParams(&patchOpts, a.client.parameterCodec). - Body(data). Do(ctx). Into(result) return result, err @@ -337,23 +363,24 @@ func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1. return *new(T), fmt.Errorf("object provided to Apply must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(obj) - if err != nil { - return *new(T), err - } if obj.GetName() == nil { return *new(T), fmt.Errorf("obj.Name must be provided to Apply") } + request, err := apply.NewRequest(a.client.client, obj) + if err != nil { + return *new(T), err + } + result := a.client.newObject() - err = a.client.client.Patch(types.ApplyPatchType). + err = request. + UseProtobufAsDefaultIfPreferred(a.client.prefersProtobuf). NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). Resource(a.client.resource). Name(*obj.GetName()). SubResource("status"). VersionedParams(&patchOpts, a.client.parameterCodec). - Body(data). Do(ctx). Into(result) return result, err diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go index b768f6f7f..11c67480f 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // MutatingWebhookConfigurations. type MutatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.MutatingWebhookConfigurationLister + Lister() admissionregistrationv1.MutatingWebhookConfigurationLister } type mutatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.MutatingWebhookConfiguration{}, + &apiadmissionregistrationv1.MutatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes } func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) } -func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister { - return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *mutatingWebhookConfigurationInformer) Lister() admissionregistrationv1.MutatingWebhookConfigurationLister { + return admissionregistrationv1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go index eaf9414e2..e6974238c 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicies. type ValidatingAdmissionPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ValidatingAdmissionPolicyLister + Lister() admissionregistrationv1.ValidatingAdmissionPolicyLister } type validatingAdmissionPolicyInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.ValidatingAdmissionPolicy{}, + &apiadmissionregistrationv1.ValidatingAdmissionPolicy{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.In } func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyInformer) Lister() v1.ValidatingAdmissionPolicyLister { - return v1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyInformer) Lister() admissionregistrationv1.ValidatingAdmissionPolicyLister { + return admissionregistrationv1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go index 8cd61bf28..34067ca38 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicyBindings. type ValidatingAdmissionPolicyBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ValidatingAdmissionPolicyBindingLister + Lister() admissionregistrationv1.ValidatingAdmissionPolicyBindingLister } type validatingAdmissionPolicyBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, + &apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubern } func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyBindingInformer) Lister() v1.ValidatingAdmissionPolicyBindingLister { - return v1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1.ValidatingAdmissionPolicyBindingLister { + return admissionregistrationv1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go index 8ddcdf2d9..42ca69c22 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingWebhookConfigurations. type ValidatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ValidatingWebhookConfigurationLister + Lister() admissionregistrationv1.ValidatingWebhookConfigurationLister } type validatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.ValidatingWebhookConfiguration{}, + &apiadmissionregistrationv1.ValidatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernet } func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) } -func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister { - return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *validatingWebhookConfigurationInformer) Lister() admissionregistrationv1.ValidatingWebhookConfigurationLister { + return admissionregistrationv1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go index 738063ee7..68ae4e25c 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -24,6 +24,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // MutatingAdmissionPolicies returns a MutatingAdmissionPolicyInformer. + MutatingAdmissionPolicies() MutatingAdmissionPolicyInformer + // MutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindingInformer. + MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInformer // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer // ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. @@ -41,6 +45,16 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// MutatingAdmissionPolicies returns a MutatingAdmissionPolicyInformer. +func (v *version) MutatingAdmissionPolicies() MutatingAdmissionPolicyInformer { + return &mutatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// MutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindingInformer. +func (v *version) MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInformer { + return &mutatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer { return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..5a23158bf --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyInformer provides access to a shared informer and lister for +// MutatingAdmissionPolicies. +type MutatingAdmissionPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyLister +} + +type mutatingAdmissionPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingAdmissionPolicyInformer constructs a new informer for MutatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingAdmissionPolicyInformer constructs a new informer for MutatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicies().Watch(context.TODO(), options) + }, + }, + &apiadmissionregistrationv1alpha1.MutatingAdmissionPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.MutatingAdmissionPolicy{}, f.defaultInformer) +} + +func (f *mutatingAdmissionPolicyInformer) Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyLister { + return admissionregistrationv1alpha1.NewMutatingAdmissionPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..efa143fe5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for +// MutatingAdmissionPolicyBindings. +type MutatingAdmissionPolicyBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingLister +} + +type mutatingAdmissionPolicyBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingAdmissionPolicyBindingInformer constructs a new informer for MutatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingAdmissionPolicyBindingInformer constructs a new informer for MutatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicyBindings().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicyBindings().Watch(context.TODO(), options) + }, + }, + &apiadmissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{}, f.defaultInformer) +} + +func (f *mutatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingLister { + return admissionregistrationv1alpha1.NewMutatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 01b8a4ab8..aaae7b297 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicies. type ValidatingAdmissionPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ValidatingAdmissionPolicyLister + Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyLister } type validatingAdmissionPolicyInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) }, }, - &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, + &apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.In } func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyInformer) Lister() v1alpha1.ValidatingAdmissionPolicyLister { - return v1alpha1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyInformer) Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyLister { + return admissionregistrationv1alpha1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index bd531512b..d62c59061 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicyBindings. type ValidatingAdmissionPolicyBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister + Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingLister } type validatingAdmissionPolicyBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) }, }, - &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, + &apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubern } func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyBindingInformer) Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister { - return v1alpha1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingLister { + return admissionregistrationv1alpha1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 12c8ec1fb..c6ca36ea2 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // MutatingWebhookConfigurations. type MutatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.MutatingWebhookConfigurationLister + Lister() admissionregistrationv1beta1.MutatingWebhookConfigurationLister } type mutatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.MutatingWebhookConfiguration{}, + &apiadmissionregistrationv1beta1.MutatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes } func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) } -func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister { - return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *mutatingWebhookConfigurationInformer) Lister() admissionregistrationv1beta1.MutatingWebhookConfigurationLister { + return admissionregistrationv1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go index d0e9cd64c..d5b4204f1 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicies. type ValidatingAdmissionPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ValidatingAdmissionPolicyLister + Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyLister } type validatingAdmissionPolicyInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, + &apiadmissionregistrationv1beta1.ValidatingAdmissionPolicy{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.In } func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyInformer) Lister() v1beta1.ValidatingAdmissionPolicyLister { - return v1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyInformer) Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyLister { + return admissionregistrationv1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 7641e9940..dbb5153ef 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicyBindings. type ValidatingAdmissionPolicyBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ValidatingAdmissionPolicyBindingLister + Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingLister } type validatingAdmissionPolicyBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, + &apiadmissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubern } func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyBindingInformer) Lister() v1beta1.ValidatingAdmissionPolicyBindingLister { - return v1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingLister { + return admissionregistrationv1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 05eb05097..602b361af 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingWebhookConfigurations. type ValidatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ValidatingWebhookConfigurationLister + Lister() admissionregistrationv1beta1.ValidatingWebhookConfigurationLister } type validatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, + &apiadmissionregistrationv1beta1.ValidatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernet } func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) } -func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister { - return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *validatingWebhookConfigurationInformer) Lister() admissionregistrationv1beta1.ValidatingWebhookConfigurationLister { + return admissionregistrationv1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go index 34175b522..a99dbd17d 100644 --- a/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiapiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageVersions. type StorageVersionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.StorageVersionLister + Lister() apiserverinternalv1alpha1.StorageVersionLister } type storageVersionInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod return client.InternalV1alpha1().StorageVersions().Watch(context.TODO(), options) }, }, - &apiserverinternalv1alpha1.StorageVersion{}, + &apiapiserverinternalv1alpha1.StorageVersion{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageVersionInformer) defaultInformer(client kubernetes.Interface, re } func (f *storageVersionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer) + return f.factory.InformerFor(&apiapiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer) } -func (f *storageVersionInformer) Lister() v1alpha1.StorageVersionLister { - return v1alpha1.NewStorageVersionLister(f.Informer().GetIndexer()) +func (f *storageVersionInformer) Lister() apiserverinternalv1alpha1.StorageVersionLister { + return apiserverinternalv1alpha1.NewStorageVersionLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go index 31e2b74d0..334a1b8f8 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ControllerRevisions. type ControllerRevisionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ControllerRevisionLister + Lister() appsv1.ControllerRevisionLister } type controllerRevisionInformer struct { @@ -71,7 +71,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac return client.AppsV1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, - &appsv1.ControllerRevision{}, + &apiappsv1.ControllerRevision{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.ControllerRevision{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.ControllerRevision{}, f.defaultInformer) } -func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister { - return v1.NewControllerRevisionLister(f.Informer().GetIndexer()) +func (f *controllerRevisionInformer) Lister() appsv1.ControllerRevisionLister { + return appsv1.NewControllerRevisionLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go index da7fe9509..73adf8cbf 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DaemonSets. type DaemonSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.DaemonSetLister + Lister() appsv1.DaemonSetLister } type daemonSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, return client.AppsV1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1.DaemonSet{}, + &apiappsv1.DaemonSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.DaemonSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.DaemonSet{}, f.defaultInformer) } -func (f *daemonSetInformer) Lister() v1.DaemonSetLister { - return v1.NewDaemonSetLister(f.Informer().GetIndexer()) +func (f *daemonSetInformer) Lister() appsv1.DaemonSetLister { + return appsv1.NewDaemonSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go index bd639bb3d..f9314844c 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.DeploymentLister + Lister() appsv1.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.AppsV1().Deployments(namespace).Watch(context.TODO(), options) }, }, - &appsv1.Deployment{}, + &apiappsv1.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1.DeploymentLister { - return v1.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() appsv1.DeploymentLister { + return appsv1.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go index 6d81a471a..dfa8ae87a 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicaSets. type ReplicaSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ReplicaSetLister + Lister() appsv1.ReplicaSetLister } type replicaSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string return client.AppsV1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1.ReplicaSet{}, + &apiappsv1.ReplicaSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resync } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.ReplicaSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.ReplicaSet{}, f.defaultInformer) } -func (f *replicaSetInformer) Lister() v1.ReplicaSetLister { - return v1.NewReplicaSetLister(f.Informer().GetIndexer()) +func (f *replicaSetInformer) Lister() appsv1.ReplicaSetLister { + return appsv1.NewReplicaSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go index c99bbb73e..84ca50123 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StatefulSets. type StatefulSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.StatefulSetLister + Lister() appsv1.StatefulSetLister } type statefulSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin return client.AppsV1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1.StatefulSet{}, + &apiappsv1.StatefulSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.StatefulSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.StatefulSet{}, f.defaultInformer) } -func (f *statefulSetInformer) Lister() v1.StatefulSetLister { - return v1.NewStatefulSetLister(f.Informer().GetIndexer()) +func (f *statefulSetInformer) Lister() appsv1.StatefulSetLister { + return appsv1.NewStatefulSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index cb36bd7fd..c0a51dbe3 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apiappsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + appsv1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ControllerRevisions. type ControllerRevisionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ControllerRevisionLister + Lister() appsv1beta1.ControllerRevisionLister } type controllerRevisionInformer struct { @@ -71,7 +71,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac return client.AppsV1beta1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta1.ControllerRevision{}, + &apiappsv1beta1.ControllerRevision{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta1.ControllerRevision{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta1.ControllerRevision{}, f.defaultInformer) } -func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister { - return v1beta1.NewControllerRevisionLister(f.Informer().GetIndexer()) +func (f *controllerRevisionInformer) Lister() appsv1beta1.ControllerRevisionLister { + return appsv1beta1.NewControllerRevisionLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go index e02a13c2f..027ae402d 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apiappsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + appsv1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.DeploymentLister + Lister() appsv1beta1.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.AppsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta1.Deployment{}, + &apiappsv1beta1.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta1.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta1.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { - return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() appsv1beta1.DeploymentLister { + return appsv1beta1.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index b845cc99c..bc357d7e7 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apiappsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + appsv1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StatefulSets. type StatefulSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.StatefulSetLister + Lister() appsv1beta1.StatefulSetLister } type statefulSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin return client.AppsV1beta1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta1.StatefulSet{}, + &apiappsv1beta1.StatefulSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta1.StatefulSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta1.StatefulSet{}, f.defaultInformer) } -func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister { - return v1beta1.NewStatefulSetLister(f.Informer().GetIndexer()) +func (f *statefulSetInformer) Lister() appsv1beta1.StatefulSetLister { + return appsv1beta1.NewStatefulSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index 4d0e91320..62a560fda 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ControllerRevisions. type ControllerRevisionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.ControllerRevisionLister + Lister() appsv1beta2.ControllerRevisionLister } type controllerRevisionInformer struct { @@ -71,7 +71,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac return client.AppsV1beta2().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.ControllerRevision{}, + &apiappsv1beta2.ControllerRevision{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.ControllerRevision{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.ControllerRevision{}, f.defaultInformer) } -func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister { - return v1beta2.NewControllerRevisionLister(f.Informer().GetIndexer()) +func (f *controllerRevisionInformer) Lister() appsv1beta2.ControllerRevisionLister { + return appsv1beta2.NewControllerRevisionLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 280e2fe46..9d4c8ede9 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DaemonSets. type DaemonSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.DaemonSetLister + Lister() appsv1beta2.DaemonSetLister } type daemonSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, return client.AppsV1beta2().DaemonSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.DaemonSet{}, + &apiappsv1beta2.DaemonSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.DaemonSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.DaemonSet{}, f.defaultInformer) } -func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister { - return v1beta2.NewDaemonSetLister(f.Informer().GetIndexer()) +func (f *daemonSetInformer) Lister() appsv1beta2.DaemonSetLister { + return appsv1beta2.NewDaemonSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go index 67bdb7972..be85192cf 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.DeploymentLister + Lister() appsv1beta2.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.AppsV1beta2().Deployments(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.Deployment{}, + &apiappsv1beta2.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1beta2.DeploymentLister { - return v1beta2.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() appsv1beta2.DeploymentLister { + return appsv1beta2.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index 85d12bb65..e5d279708 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicaSets. type ReplicaSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.ReplicaSetLister + Lister() appsv1beta2.ReplicaSetLister } type replicaSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string return client.AppsV1beta2().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.ReplicaSet{}, + &apiappsv1beta2.ReplicaSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resync } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.ReplicaSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.ReplicaSet{}, f.defaultInformer) } -func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister { - return v1beta2.NewReplicaSetLister(f.Informer().GetIndexer()) +func (f *replicaSetInformer) Lister() appsv1beta2.ReplicaSetLister { + return appsv1beta2.NewReplicaSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index 2fab6f7b2..d147fc885 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StatefulSets. type StatefulSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.StatefulSetLister + Lister() appsv1beta2.StatefulSetLister } type statefulSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin return client.AppsV1beta2().StatefulSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.StatefulSet{}, + &apiappsv1beta2.StatefulSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.StatefulSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.StatefulSet{}, f.defaultInformer) } -func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister { - return v1beta2.NewStatefulSetLister(f.Informer().GetIndexer()) +func (f *statefulSetInformer) Lister() appsv1beta2.StatefulSetLister { + return appsv1beta2.NewStatefulSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index 44f041e90..fce275934 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - autoscalingv1 "k8s.io/api/autoscaling/v1" + apiautoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/autoscaling/v1" + autoscalingv1 "k8s.io/client-go/listers/autoscaling/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.HorizontalPodAutoscalerLister + Lister() autoscalingv1.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv1.HorizontalPodAutoscaler{}, + &apiautoscalingv1.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv1.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv1.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister { - return v1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv1.HorizontalPodAutoscalerLister { + return autoscalingv1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go index 5ddb3b015..92104f822 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v2 import ( - "context" + context "context" time "time" - autoscalingv2 "k8s.io/api/autoscaling/v2" + apiautoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v2 "k8s.io/client-go/listers/autoscaling/v2" + autoscalingv2 "k8s.io/client-go/listers/autoscaling/v2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.HorizontalPodAutoscalerLister + Lister() autoscalingv2.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv2.HorizontalPodAutoscaler{}, + &apiautoscalingv2.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv2.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv2.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v2.HorizontalPodAutoscalerLister { - return v2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv2.HorizontalPodAutoscalerLister { + return autoscalingv2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 6385a2a19..b77602718 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v2beta1 import ( - "context" + context "context" time "time" - autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + apiautoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta1.HorizontalPodAutoscalerLister + Lister() autoscalingv2beta1.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv2beta1.HorizontalPodAutoscaler{}, + &apiautoscalingv2beta1.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister { - return v2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv2beta1.HorizontalPodAutoscalerLister { + return autoscalingv2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go index f1ac3f073..1848429b1 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v2beta2 import ( - "context" + context "context" time "time" - autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + apiautoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v2beta2 "k8s.io/client-go/listers/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/client-go/listers/autoscaling/v2beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta2.HorizontalPodAutoscalerLister + Lister() autoscalingv2beta2.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv2beta2.HorizontalPodAutoscaler{}, + &apiautoscalingv2beta2.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv2beta2.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv2beta2.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v2beta2.HorizontalPodAutoscalerLister { - return v2beta2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv2beta2.HorizontalPodAutoscalerLister { + return autoscalingv2beta2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go index fdfb65513..2a188acdd 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - batchv1 "k8s.io/api/batch/v1" + apibatchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/batch/v1" + batchv1 "k8s.io/client-go/listers/batch/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CronJobs. type CronJobInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CronJobLister + Lister() batchv1.CronJobLister } type cronJobInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r return client.BatchV1().CronJobs(namespace).Watch(context.TODO(), options) }, }, - &batchv1.CronJob{}, + &apibatchv1.CronJob{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batchv1.CronJob{}, f.defaultInformer) + return f.factory.InformerFor(&apibatchv1.CronJob{}, f.defaultInformer) } -func (f *cronJobInformer) Lister() v1.CronJobLister { - return v1.NewCronJobLister(f.Informer().GetIndexer()) +func (f *cronJobInformer) Lister() batchv1.CronJobLister { + return batchv1.NewCronJobLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/batch/v1/job.go b/vendor/k8s.io/client-go/informers/batch/v1/job.go index 4992f5228..439ec7a6a 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1/job.go +++ b/vendor/k8s.io/client-go/informers/batch/v1/job.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - batchv1 "k8s.io/api/batch/v1" + apibatchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/batch/v1" + batchv1 "k8s.io/client-go/listers/batch/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Jobs. type JobInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.JobLister + Lister() batchv1.JobLister } type jobInformer struct { @@ -71,7 +71,7 @@ func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyn return client.BatchV1().Jobs(namespace).Watch(context.TODO(), options) }, }, - &batchv1.Job{}, + &apibatchv1.Job{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *jobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *jobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batchv1.Job{}, f.defaultInformer) + return f.factory.InformerFor(&apibatchv1.Job{}, f.defaultInformer) } -func (f *jobInformer) Lister() v1.JobLister { - return v1.NewJobLister(f.Informer().GetIndexer()) +func (f *jobInformer) Lister() batchv1.JobLister { + return batchv1.NewJobLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 820c93eaa..1f061e16c 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - batchv1beta1 "k8s.io/api/batch/v1beta1" + apibatchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/batch/v1beta1" + batchv1beta1 "k8s.io/client-go/listers/batch/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CronJobs. type CronJobInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CronJobLister + Lister() batchv1beta1.CronJobLister } type cronJobInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r return client.BatchV1beta1().CronJobs(namespace).Watch(context.TODO(), options) }, }, - &batchv1beta1.CronJob{}, + &apibatchv1beta1.CronJob{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batchv1beta1.CronJob{}, f.defaultInformer) + return f.factory.InformerFor(&apibatchv1beta1.CronJob{}, f.defaultInformer) } -func (f *cronJobInformer) Lister() v1beta1.CronJobLister { - return v1beta1.NewCronJobLister(f.Informer().GetIndexer()) +func (f *cronJobInformer) Lister() batchv1beta1.CronJobLister { + return batchv1beta1.NewCronJobLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go index 73d33a914..0bd32ab95 100644 --- a/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - certificatesv1 "k8s.io/api/certificates/v1" + apicertificatesv1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/certificates/v1" + certificatesv1 "k8s.io/client-go/listers/certificates/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CertificateSigningRequests. type CertificateSigningRequestInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CertificateSigningRequestLister + Lister() certificatesv1.CertificateSigningRequestLister } type certificateSigningRequestInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r return client.CertificatesV1().CertificateSigningRequests().Watch(context.TODO(), options) }, }, - &certificatesv1.CertificateSigningRequest{}, + &apicertificatesv1.CertificateSigningRequest{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.In } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificatesv1.CertificateSigningRequest{}, f.defaultInformer) + return f.factory.InformerFor(&apicertificatesv1.CertificateSigningRequest{}, f.defaultInformer) } -func (f *certificateSigningRequestInformer) Lister() v1.CertificateSigningRequestLister { - return v1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) +func (f *certificateSigningRequestInformer) Lister() certificatesv1.CertificateSigningRequestLister { + return certificatesv1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go index e8b341587..046688961 100644 --- a/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + apicertificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1" + certificatesv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterTrustBundles. type ClusterTrustBundleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterTrustBundleLister + Lister() certificatesv1alpha1.ClusterTrustBundleLister } type clusterTrustBundleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPe return client.CertificatesV1alpha1().ClusterTrustBundles().Watch(context.TODO(), options) }, }, - &certificatesv1alpha1.ClusterTrustBundle{}, + &apicertificatesv1alpha1.ClusterTrustBundle{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterTrustBundleInformer) defaultInformer(client kubernetes.Interface } func (f *clusterTrustBundleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificatesv1alpha1.ClusterTrustBundle{}, f.defaultInformer) + return f.factory.InformerFor(&apicertificatesv1alpha1.ClusterTrustBundle{}, f.defaultInformer) } -func (f *clusterTrustBundleInformer) Lister() v1alpha1.ClusterTrustBundleLister { - return v1alpha1.NewClusterTrustBundleLister(f.Informer().GetIndexer()) +func (f *clusterTrustBundleInformer) Lister() certificatesv1alpha1.ClusterTrustBundleLister { + return certificatesv1alpha1.NewClusterTrustBundleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 4e167ab8b..b3aff1cc8 100644 --- a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + apicertificatesv1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/certificates/v1beta1" + certificatesv1beta1 "k8s.io/client-go/listers/certificates/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CertificateSigningRequests. type CertificateSigningRequestInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CertificateSigningRequestLister + Lister() certificatesv1beta1.CertificateSigningRequestLister } type certificateSigningRequestInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r return client.CertificatesV1beta1().CertificateSigningRequests().Watch(context.TODO(), options) }, }, - &certificatesv1beta1.CertificateSigningRequest{}, + &apicertificatesv1beta1.CertificateSigningRequest{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.In } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificatesv1beta1.CertificateSigningRequest{}, f.defaultInformer) + return f.factory.InformerFor(&apicertificatesv1beta1.CertificateSigningRequest{}, f.defaultInformer) } -func (f *certificateSigningRequestInformer) Lister() v1beta1.CertificateSigningRequestLister { - return v1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) +func (f *certificateSigningRequestInformer) Lister() certificatesv1beta1.CertificateSigningRequestLister { + return certificatesv1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/coordination/interface.go b/vendor/k8s.io/client-go/informers/coordination/interface.go index 026b4d947..d5bde12ac 100644 --- a/vendor/k8s.io/client-go/informers/coordination/interface.go +++ b/vendor/k8s.io/client-go/informers/coordination/interface.go @@ -20,7 +20,7 @@ package coordination import ( v1 "k8s.io/client-go/informers/coordination/v1" - v1alpha1 "k8s.io/client-go/informers/coordination/v1alpha1" + v1alpha2 "k8s.io/client-go/informers/coordination/v1alpha2" v1beta1 "k8s.io/client-go/informers/coordination/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -29,8 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -51,9 +51,9 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/vendor/k8s.io/client-go/informers/coordination/v1/lease.go index e538923a8..0627d7309 100644 --- a/vendor/k8s.io/client-go/informers/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/informers/coordination/v1/lease.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - coordinationv1 "k8s.io/api/coordination/v1" + apicoordinationv1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/coordination/v1" + coordinationv1 "k8s.io/client-go/listers/coordination/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Leases. type LeaseInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.LeaseLister + Lister() coordinationv1.LeaseLister } type leaseInformer struct { @@ -71,7 +71,7 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res return client.CoordinationV1().Leases(namespace).Watch(context.TODO(), options) }, }, - &coordinationv1.Lease{}, + &apicoordinationv1.Lease{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *leaseInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&coordinationv1.Lease{}, f.defaultInformer) + return f.factory.InformerFor(&apicoordinationv1.Lease{}, f.defaultInformer) } -func (f *leaseInformer) Lister() v1.LeaseLister { - return v1.NewLeaseLister(f.Informer().GetIndexer()) +func (f *leaseInformer) Lister() coordinationv1.LeaseLister { + return coordinationv1.NewLeaseLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go similarity index 98% rename from vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go rename to vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go index 4058af280..ba83768ad 100644 --- a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go similarity index 82% rename from vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go rename to vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go index 21bc47a8e..f38adf652 100644 --- a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - "context" + context "context" time "time" - coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + apicoordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/client-go/listers/coordination/v1alpha2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // LeaseCandidates. type LeaseCandidateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.LeaseCandidateLister + Lister() coordinationv1alpha2.LeaseCandidateLister } type leaseCandidateInformer struct { @@ -62,16 +62,16 @@ func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace st if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1alpha1().LeaseCandidates(namespace).List(context.TODO(), options) + return client.CoordinationV1alpha2().LeaseCandidates(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1alpha1().LeaseCandidates(namespace).Watch(context.TODO(), options) + return client.CoordinationV1alpha2().LeaseCandidates(namespace).Watch(context.TODO(), options) }, }, - &coordinationv1alpha1.LeaseCandidate{}, + &apicoordinationv1alpha2.LeaseCandidate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *leaseCandidateInformer) defaultInformer(client kubernetes.Interface, re } func (f *leaseCandidateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&coordinationv1alpha1.LeaseCandidate{}, f.defaultInformer) + return f.factory.InformerFor(&apicoordinationv1alpha2.LeaseCandidate{}, f.defaultInformer) } -func (f *leaseCandidateInformer) Lister() v1alpha1.LeaseCandidateLister { - return v1alpha1.NewLeaseCandidateLister(f.Informer().GetIndexer()) +func (f *leaseCandidateInformer) Lister() coordinationv1alpha2.LeaseCandidateLister { + return coordinationv1alpha2.NewLeaseCandidateLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go index 5a6959c0b..563a25c30 100644 --- a/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + apicoordinationv1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/coordination/v1beta1" + coordinationv1beta1 "k8s.io/client-go/listers/coordination/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Leases. type LeaseInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.LeaseLister + Lister() coordinationv1beta1.LeaseLister } type leaseInformer struct { @@ -71,7 +71,7 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res return client.CoordinationV1beta1().Leases(namespace).Watch(context.TODO(), options) }, }, - &coordinationv1beta1.Lease{}, + &apicoordinationv1beta1.Lease{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *leaseInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&coordinationv1beta1.Lease{}, f.defaultInformer) + return f.factory.InformerFor(&apicoordinationv1beta1.Lease{}, f.defaultInformer) } -func (f *leaseInformer) Lister() v1beta1.LeaseLister { - return v1beta1.NewLeaseLister(f.Informer().GetIndexer()) +func (f *leaseInformer) Lister() coordinationv1beta1.LeaseLister { + return coordinationv1beta1.NewLeaseLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go index ccdee535b..2a97c638f 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ComponentStatuses. type ComponentStatusInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ComponentStatusLister + Lister() corev1.ComponentStatusLister } type componentStatusInformer struct { @@ -70,7 +70,7 @@ func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPerio return client.CoreV1().ComponentStatuses().Watch(context.TODO(), options) }, }, - &corev1.ComponentStatus{}, + &apicorev1.ComponentStatus{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, r } func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ComponentStatus{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ComponentStatus{}, f.defaultInformer) } -func (f *componentStatusInformer) Lister() v1.ComponentStatusLister { - return v1.NewComponentStatusLister(f.Informer().GetIndexer()) +func (f *componentStatusInformer) Lister() corev1.ComponentStatusLister { + return corev1.NewComponentStatusLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/vendor/k8s.io/client-go/informers/core/v1/configmap.go index 625358178..07f5fb1f7 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ConfigMaps. type ConfigMapInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ConfigMapLister + Lister() corev1.ConfigMapLister } type configMapInformer struct { @@ -71,7 +71,7 @@ func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, return client.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), options) }, }, - &corev1.ConfigMap{}, + &apicorev1.ConfigMap{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *configMapInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ConfigMap{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ConfigMap{}, f.defaultInformer) } -func (f *configMapInformer) Lister() v1.ConfigMapLister { - return v1.NewConfigMapLister(f.Informer().GetIndexer()) +func (f *configMapInformer) Lister() corev1.ConfigMapLister { + return corev1.NewConfigMapLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go index cd0f25b7f..6171d5df6 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Endpoints. type EndpointsInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EndpointsLister + Lister() corev1.EndpointsLister } type endpointsInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, return client.CoreV1().Endpoints(namespace).Watch(context.TODO(), options) }, }, - &corev1.Endpoints{}, + &apicorev1.Endpoints{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *endpointsInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Endpoints{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Endpoints{}, f.defaultInformer) } -func (f *endpointsInformer) Lister() v1.EndpointsLister { - return v1.NewEndpointsLister(f.Informer().GetIndexer()) +func (f *endpointsInformer) Lister() corev1.EndpointsLister { + return corev1.NewEndpointsLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/event.go b/vendor/k8s.io/client-go/informers/core/v1/event.go index 8825e9b7a..55500679d 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/event.go +++ b/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Events. type EventInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EventLister + Lister() corev1.EventLister } type eventInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res return client.CoreV1().Events(namespace).Watch(context.TODO(), options) }, }, - &corev1.Event{}, + &apicorev1.Event{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Event{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Event{}, f.defaultInformer) } -func (f *eventInformer) Lister() v1.EventLister { - return v1.NewEventLister(f.Informer().GetIndexer()) +func (f *eventInformer) Lister() corev1.EventLister { + return corev1.NewEventLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go index 4cbfda1f7..2c2dec79f 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // LimitRanges. type LimitRangeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.LimitRangeLister + Lister() corev1.LimitRangeLister } type limitRangeInformer struct { @@ -71,7 +71,7 @@ func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string return client.CoreV1().LimitRanges(namespace).Watch(context.TODO(), options) }, }, - &corev1.LimitRange{}, + &apicorev1.LimitRange{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resync } func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.LimitRange{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.LimitRange{}, f.defaultInformer) } -func (f *limitRangeInformer) Lister() v1.LimitRangeLister { - return v1.NewLimitRangeLister(f.Informer().GetIndexer()) +func (f *limitRangeInformer) Lister() corev1.LimitRangeLister { + return corev1.NewLimitRangeLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/vendor/k8s.io/client-go/informers/core/v1/namespace.go index 506f930a7..09e0740ba 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Namespaces. type NamespaceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NamespaceLister + Lister() corev1.NamespaceLister } type namespaceInformer struct { @@ -70,7 +70,7 @@ func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time return client.CoreV1().Namespaces().Watch(context.TODO(), options) }, }, - &corev1.Namespace{}, + &apicorev1.Namespace{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *namespaceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Namespace{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Namespace{}, f.defaultInformer) } -func (f *namespaceInformer) Lister() v1.NamespaceLister { - return v1.NewNamespaceLister(f.Informer().GetIndexer()) +func (f *namespaceInformer) Lister() corev1.NamespaceLister { + return corev1.NewNamespaceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/node.go b/vendor/k8s.io/client-go/informers/core/v1/node.go index 9939fc2cb..608aa9fb7 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/node.go +++ b/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Nodes. type NodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NodeLister + Lister() corev1.NodeLister } type nodeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Dura return client.CoreV1().Nodes().Watch(context.TODO(), options) }, }, - &corev1.Node{}, + &apicorev1.Node{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *nodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Node{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Node{}, f.defaultInformer) } -func (f *nodeInformer) Lister() v1.NodeLister { - return v1.NewNodeLister(f.Informer().GetIndexer()) +func (f *nodeInformer) Lister() corev1.NodeLister { + return corev1.NewNodeLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go index c82445997..19c0929e5 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PersistentVolumes. type PersistentVolumeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PersistentVolumeLister + Lister() corev1.PersistentVolumeLister } type persistentVolumeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeri return client.CoreV1().PersistentVolumes().Watch(context.TODO(), options) }, }, - &corev1.PersistentVolume{}, + &apicorev1.PersistentVolume{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, } func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.PersistentVolume{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.PersistentVolume{}, f.defaultInformer) } -func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister { - return v1.NewPersistentVolumeLister(f.Informer().GetIndexer()) +func (f *persistentVolumeInformer) Lister() corev1.PersistentVolumeLister { + return corev1.NewPersistentVolumeLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 7a7df1cff..27c35fec1 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PersistentVolumeClaims. type PersistentVolumeClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PersistentVolumeClaimLister + Lister() corev1.PersistentVolumeClaimLister } type persistentVolumeClaimInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, names return client.CoreV1().PersistentVolumeClaims(namespace).Watch(context.TODO(), options) }, }, - &corev1.PersistentVolumeClaim{}, + &apicorev1.PersistentVolumeClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interf } func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.PersistentVolumeClaim{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.PersistentVolumeClaim{}, f.defaultInformer) } -func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister { - return v1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) +func (f *persistentVolumeClaimInformer) Lister() corev1.PersistentVolumeClaimLister { + return corev1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/pod.go b/vendor/k8s.io/client-go/informers/core/v1/pod.go index 5c713a9b6..c661704bd 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/pod.go +++ b/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Pods. type PodInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PodLister + Lister() corev1.PodLister } type podInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyn return client.CoreV1().Pods(namespace).Watch(context.TODO(), options) }, }, - &corev1.Pod{}, + &apicorev1.Pod{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *podInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Pod{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Pod{}, f.defaultInformer) } -func (f *podInformer) Lister() v1.PodLister { - return v1.NewPodLister(f.Informer().GetIndexer()) +func (f *podInformer) Lister() corev1.PodLister { + return corev1.NewPodLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go index 2a16e910d..0d16c5b4e 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodTemplates. type PodTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PodTemplateLister + Lister() corev1.PodTemplateLister } type podTemplateInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace strin return client.CoreV1().PodTemplates(namespace).Watch(context.TODO(), options) }, }, - &corev1.PodTemplate{}, + &apicorev1.PodTemplate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.PodTemplate{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.PodTemplate{}, f.defaultInformer) } -func (f *podTemplateInformer) Lister() v1.PodTemplateLister { - return v1.NewPodTemplateLister(f.Informer().GetIndexer()) +func (f *podTemplateInformer) Lister() corev1.PodTemplateLister { + return corev1.NewPodTemplateLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go index 930beb4cd..5866ec151 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicationControllers. type ReplicationControllerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ReplicationControllerLister + Lister() corev1.ReplicationControllerLister } type replicationControllerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicationControllerInformer(client kubernetes.Interface, names return client.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), options) }, }, - &corev1.ReplicationController{}, + &apicorev1.ReplicationController{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interf } func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ReplicationController{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ReplicationController{}, f.defaultInformer) } -func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister { - return v1.NewReplicationControllerLister(f.Informer().GetIndexer()) +func (f *replicationControllerInformer) Lister() corev1.ReplicationControllerLister { + return corev1.NewReplicationControllerLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go index 619262a61..999b49546 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceQuotas. type ResourceQuotaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ResourceQuotaLister + Lister() corev1.ResourceQuotaLister } type resourceQuotaInformer struct { @@ -71,7 +71,7 @@ func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace str return client.CoreV1().ResourceQuotas(namespace).Watch(context.TODO(), options) }, }, - &corev1.ResourceQuota{}, + &apicorev1.ResourceQuota{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ResourceQuota{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ResourceQuota{}, f.defaultInformer) } -func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister { - return v1.NewResourceQuotaLister(f.Informer().GetIndexer()) +func (f *resourceQuotaInformer) Lister() corev1.ResourceQuotaLister { + return corev1.NewResourceQuotaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/secret.go b/vendor/k8s.io/client-go/informers/core/v1/secret.go index a6be07069..f3d371501 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/secret.go +++ b/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Secrets. type SecretInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.SecretLister + Lister() corev1.SecretLister } type secretInformer struct { @@ -71,7 +71,7 @@ func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, re return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options) }, }, - &corev1.Secret{}, + &apicorev1.Secret{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeri } func (f *secretInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Secret{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Secret{}, f.defaultInformer) } -func (f *secretInformer) Lister() v1.SecretLister { - return v1.NewSecretLister(f.Informer().GetIndexer()) +func (f *secretInformer) Lister() corev1.SecretLister { + return corev1.NewSecretLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/service.go b/vendor/k8s.io/client-go/informers/core/v1/service.go index 3d9ecc6e9..c4bc294a3 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/service.go +++ b/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Services. type ServiceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ServiceLister + Lister() corev1.ServiceLister } type serviceInformer struct { @@ -71,7 +71,7 @@ func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, r return client.CoreV1().Services(namespace).Watch(context.TODO(), options) }, }, - &corev1.Service{}, + &apicorev1.Service{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *serviceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Service{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Service{}, f.defaultInformer) } -func (f *serviceInformer) Lister() v1.ServiceLister { - return v1.NewServiceLister(f.Informer().GetIndexer()) +func (f *serviceInformer) Lister() corev1.ServiceLister { + return corev1.NewServiceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go index 44371c9fa..b04b44cb4 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ServiceAccounts. type ServiceAccountInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ServiceAccountLister + Lister() corev1.ServiceAccountLister } type serviceAccountInformer struct { @@ -71,7 +71,7 @@ func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace st return client.CoreV1().ServiceAccounts(namespace).Watch(context.TODO(), options) }, }, - &corev1.ServiceAccount{}, + &apicorev1.ServiceAccount{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, re } func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ServiceAccount{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ServiceAccount{}, f.defaultInformer) } -func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister { - return v1.NewServiceAccountLister(f.Informer().GetIndexer()) +func (f *serviceAccountInformer) Lister() corev1.ServiceAccountLister { + return corev1.NewServiceAccountLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go index 6c6c3372b..ec09b2d26 100644 --- a/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - discoveryv1 "k8s.io/api/discovery/v1" + apidiscoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/discovery/v1" + discoveryv1 "k8s.io/client-go/listers/discovery/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // EndpointSlices. type EndpointSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EndpointSliceLister + Lister() discoveryv1.EndpointSliceLister } type endpointSliceInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str return client.DiscoveryV1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, - &discoveryv1.EndpointSlice{}, + &apidiscoveryv1.EndpointSlice{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, res } func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&discoveryv1.EndpointSlice{}, f.defaultInformer) + return f.factory.InformerFor(&apidiscoveryv1.EndpointSlice{}, f.defaultInformer) } -func (f *endpointSliceInformer) Lister() v1.EndpointSliceLister { - return v1.NewEndpointSliceLister(f.Informer().GetIndexer()) +func (f *endpointSliceInformer) Lister() discoveryv1.EndpointSliceLister { + return discoveryv1.NewEndpointSliceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go index 69ae38a91..3af1a3be9 100644 --- a/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + apidiscoveryv1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/discovery/v1beta1" + discoveryv1beta1 "k8s.io/client-go/listers/discovery/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // EndpointSlices. type EndpointSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.EndpointSliceLister + Lister() discoveryv1beta1.EndpointSliceLister } type endpointSliceInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, - &discoveryv1beta1.EndpointSlice{}, + &apidiscoveryv1beta1.EndpointSlice{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, res } func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&discoveryv1beta1.EndpointSlice{}, f.defaultInformer) + return f.factory.InformerFor(&apidiscoveryv1beta1.EndpointSlice{}, f.defaultInformer) } -func (f *endpointSliceInformer) Lister() v1beta1.EndpointSliceLister { - return v1beta1.NewEndpointSliceLister(f.Informer().GetIndexer()) +func (f *endpointSliceInformer) Lister() discoveryv1beta1.EndpointSliceLister { + return discoveryv1beta1.NewEndpointSliceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/events/v1/event.go b/vendor/k8s.io/client-go/informers/events/v1/event.go index f8d35ee15..518d79841 100644 --- a/vendor/k8s.io/client-go/informers/events/v1/event.go +++ b/vendor/k8s.io/client-go/informers/events/v1/event.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - eventsv1 "k8s.io/api/events/v1" + apieventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/events/v1" + eventsv1 "k8s.io/client-go/listers/events/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Events. type EventInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EventLister + Lister() eventsv1.EventLister } type eventInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res return client.EventsV1().Events(namespace).Watch(context.TODO(), options) }, }, - &eventsv1.Event{}, + &apieventsv1.Event{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&eventsv1.Event{}, f.defaultInformer) + return f.factory.InformerFor(&apieventsv1.Event{}, f.defaultInformer) } -func (f *eventInformer) Lister() v1.EventLister { - return v1.NewEventLister(f.Informer().GetIndexer()) +func (f *eventInformer) Lister() eventsv1.EventLister { + return eventsv1.NewEventLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go index 025f6a5cf..5324599bb 100644 --- a/vendor/k8s.io/client-go/informers/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - eventsv1beta1 "k8s.io/api/events/v1beta1" + apieventsv1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/events/v1beta1" + eventsv1beta1 "k8s.io/client-go/listers/events/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Events. type EventInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.EventLister + Lister() eventsv1beta1.EventLister } type eventInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res return client.EventsV1beta1().Events(namespace).Watch(context.TODO(), options) }, }, - &eventsv1beta1.Event{}, + &apieventsv1beta1.Event{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&eventsv1beta1.Event{}, f.defaultInformer) + return f.factory.InformerFor(&apieventsv1beta1.Event{}, f.defaultInformer) } -func (f *eventInformer) Lister() v1beta1.EventLister { - return v1beta1.NewEventLister(f.Informer().GetIndexer()) +func (f *eventInformer) Lister() eventsv1beta1.EventLister { + return eventsv1beta1.NewEventLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index 050080a59..ea77575c9 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DaemonSets. type DaemonSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.DaemonSetLister + Lister() extensionsv1beta1.DaemonSetLister } type daemonSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.DaemonSet{}, + &apiextensionsv1beta1.DaemonSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.DaemonSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.DaemonSet{}, f.defaultInformer) } -func (f *daemonSetInformer) Lister() v1beta1.DaemonSetLister { - return v1beta1.NewDaemonSetLister(f.Informer().GetIndexer()) +func (f *daemonSetInformer) Lister() extensionsv1beta1.DaemonSetLister { + return extensionsv1beta1.NewDaemonSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index 1b16c5cc9..1b2770ce0 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.DeploymentLister + Lister() extensionsv1beta1.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.ExtensionsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.Deployment{}, + &apiextensionsv1beta1.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { - return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() extensionsv1beta1.DeploymentLister { + return extensionsv1beta1.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index f01a88761..63e734060 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressLister + Lister() extensionsv1beta1.IngressLister } type ingressInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r return client.ExtensionsV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.Ingress{}, + &apiextensionsv1beta1.Ingress{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1beta1.IngressLister { - return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() extensionsv1beta1.IngressLister { + return extensionsv1beta1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go index 4a924619f..024653af2 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // NetworkPolicies. type NetworkPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.NetworkPolicyLister + Lister() extensionsv1beta1.NetworkPolicyLister } type networkPolicyInformer struct { @@ -71,7 +71,7 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.NetworkPolicy{}, + &apiextensionsv1beta1.NetworkPolicy{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, res } func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.NetworkPolicy{}, f.defaultInformer) } -func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister { - return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *networkPolicyInformer) Lister() extensionsv1beta1.NetworkPolicyLister { + return extensionsv1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index f7e224bcf..392ccef86 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicaSets. type ReplicaSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ReplicaSetLister + Lister() extensionsv1beta1.ReplicaSetLister } type replicaSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.ReplicaSet{}, + &apiextensionsv1beta1.ReplicaSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resync } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.ReplicaSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.ReplicaSet{}, f.defaultInformer) } -func (f *replicaSetInformer) Lister() v1beta1.ReplicaSetLister { - return v1beta1.NewReplicaSetLister(f.Informer().GetIndexer()) +func (f *replicaSetInformer) Lister() extensionsv1beta1.ReplicaSetLister { + return extensionsv1beta1.NewReplicaSetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go index 30c41b189..945bc351e 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/flowcontrol/v1" + flowcontrolv1 "k8s.io/client-go/listers/flowcontrol/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.FlowSchemaLister + Lister() flowcontrolv1.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1.FlowSchema{}, + &apiflowcontrolv1.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1.FlowSchemaLister { - return v1.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1.FlowSchemaLister { + return flowcontrolv1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go index 7092c2572..eec6388b2 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/flowcontrol/v1" + flowcontrolv1 "k8s.io/client-go/listers/flowcontrol/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PriorityLevelConfigurationLister + Lister() flowcontrolv1.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1.PriorityLevelConfiguration{}, + &apiflowcontrolv1.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1.PriorityLevelConfigurationLister { - return v1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1.PriorityLevelConfigurationLister { + return flowcontrolv1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go index 13f4ff093..30d099773 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + apiflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.FlowSchemaLister + Lister() flowcontrolv1beta1.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1beta1().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta1.FlowSchema{}, + &apiflowcontrolv1beta1.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta1.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta1.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1beta1.FlowSchemaLister { - return v1beta1.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1beta1.FlowSchemaLister { + return flowcontrolv1beta1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go index fa4835906..2a8a867c4 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + apiflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PriorityLevelConfigurationLister + Lister() flowcontrolv1beta1.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1beta1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta1.PriorityLevelConfiguration{}, + &apiflowcontrolv1beta1.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1beta1.PriorityLevelConfigurationLister { - return v1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1beta1.PriorityLevelConfigurationLister { + return flowcontrolv1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go index 6f6abecea..edfed12c5 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.FlowSchemaLister + Lister() flowcontrolv1beta2.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1beta2().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta2.FlowSchema{}, + &apiflowcontrolv1beta2.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta2.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta2.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1beta2.FlowSchemaLister { - return v1beta2.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1beta2.FlowSchemaLister { + return flowcontrolv1beta2.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go index 306a90185..624e0373e 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.PriorityLevelConfigurationLister + Lister() flowcontrolv1beta2.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1beta2().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta2.PriorityLevelConfiguration{}, + &apiflowcontrolv1beta2.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta2.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta2.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1beta2.PriorityLevelConfigurationLister { - return v1beta2.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1beta2.PriorityLevelConfigurationLister { + return flowcontrolv1beta2.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go index 56d8c8b11..bd3f5e6ed 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta3 import ( - "context" + context "context" time "time" - flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + apiflowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta3.FlowSchemaLister + Lister() flowcontrolv1beta3.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1beta3().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta3.FlowSchema{}, + &apiflowcontrolv1beta3.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta3.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta3.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1beta3.FlowSchemaLister { - return v1beta3.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1beta3.FlowSchemaLister { + return flowcontrolv1beta3.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go index 71f8d5b07..5695d5d4d 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta3 import ( - "context" + context "context" time "time" - flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + apiflowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta3.PriorityLevelConfigurationLister + Lister() flowcontrolv1beta3.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1beta3().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta3.PriorityLevelConfiguration{}, + &apiflowcontrolv1beta3.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta3.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta3.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1beta3.PriorityLevelConfigurationLister { - return v1beta3.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1beta3.PriorityLevelConfigurationLister { + return flowcontrolv1beta3.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 39a9d3bf4..fd331686d 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -19,7 +19,7 @@ limitations under the License. package informers import ( - "fmt" + fmt "fmt" v1 "k8s.io/api/admissionregistration/v1" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" @@ -38,7 +38,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" - coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + v1alpha2 "k8s.io/api/coordination/v1alpha2" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -62,6 +62,7 @@ import ( rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -110,6 +111,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("mutatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().MutatingAdmissionPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("mutatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().MutatingAdmissionPolicyBindings().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): @@ -199,9 +204,9 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case coordinationv1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil - // Group=coordination.k8s.io, Version=v1alpha1 - case coordinationv1alpha1.SchemeGroupVersion.WithResource("leasecandidates"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1alpha1().LeaseCandidates().Informer()}, nil + // Group=coordination.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("leasecandidates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1alpha2().LeaseCandidates().Informer()}, nil // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"): @@ -374,8 +379,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=resource.k8s.io, Version=v1alpha3 case v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().DeviceClasses().Informer()}, nil - case v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().PodSchedulingContexts().Informer()}, nil case v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"): return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaims().Informer()}, nil case v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"): @@ -383,6 +386,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1alpha3.SchemeGroupVersion.WithResource("resourceslices"): return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceSlices().Informer()}, nil + // Group=resource.k8s.io, Version=v1beta1 + case resourcev1beta1.SchemeGroupVersion.WithResource("deviceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().DeviceClasses().Informer()}, nil + case resourcev1beta1.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().ResourceClaims().Informer()}, nil + case resourcev1beta1.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().ResourceClaimTemplates().Informer()}, nil + case resourcev1beta1.SchemeGroupVersion.WithResource("resourceslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().ResourceSlices().Informer()}, nil + // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1().PriorityClasses().Informer()}, nil diff --git a/vendor/k8s.io/client-go/informers/networking/v1/ingress.go b/vendor/k8s.io/client-go/informers/networking/v1/ingress.go index 06c317ad3..a0deccf16 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/informers/networking/v1/ingress.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - networkingv1 "k8s.io/api/networking/v1" + apinetworkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/networking/v1" + networkingv1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.IngressLister + Lister() networkingv1.IngressLister } type ingressInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r return client.NetworkingV1().Ingresses(namespace).Watch(context.TODO(), options) }, }, - &networkingv1.Ingress{}, + &apinetworkingv1.Ingress{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1.IngressLister { - return v1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() networkingv1.IngressLister { + return networkingv1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go index 15514745b..7eb174516 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - networkingv1 "k8s.io/api/networking/v1" + apinetworkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/networking/v1" + networkingv1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IngressClasses. type IngressClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.IngressClassLister + Lister() networkingv1.IngressClassLister } type ingressClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod t return client.NetworkingV1().IngressClasses().Watch(context.TODO(), options) }, }, - &networkingv1.IngressClass{}, + &apinetworkingv1.IngressClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *ingressClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *ingressClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1.IngressClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1.IngressClass{}, f.defaultInformer) } -func (f *ingressClassInformer) Lister() v1.IngressClassLister { - return v1.NewIngressClassLister(f.Informer().GetIndexer()) +func (f *ingressClassInformer) Lister() networkingv1.IngressClassLister { + return networkingv1.NewIngressClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go index a75c9ac21..d4bac2911 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - networkingv1 "k8s.io/api/networking/v1" + apinetworkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/networking/v1" + networkingv1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // NetworkPolicies. type NetworkPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NetworkPolicyLister + Lister() networkingv1.NetworkPolicyLister } type networkPolicyInformer struct { @@ -71,7 +71,7 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str return client.NetworkingV1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, - &networkingv1.NetworkPolicy{}, + &apinetworkingv1.NetworkPolicy{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, res } func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1.NetworkPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1.NetworkPolicy{}, f.defaultInformer) } -func (f *networkPolicyInformer) Lister() v1.NetworkPolicyLister { - return v1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *networkPolicyInformer) Lister() networkingv1.NetworkPolicyLister { + return networkingv1.NewNetworkPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go index a1083dbf0..f04c14535 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + apinetworkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + networkingv1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IPAddresses. type IPAddressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.IPAddressLister + Lister() networkingv1alpha1.IPAddressLister } type iPAddressInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time return client.NetworkingV1alpha1().IPAddresses().Watch(context.TODO(), options) }, }, - &networkingv1alpha1.IPAddress{}, + &apinetworkingv1alpha1.IPAddress{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.IPAddress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1alpha1.IPAddress{}, f.defaultInformer) } -func (f *iPAddressInformer) Lister() v1alpha1.IPAddressLister { - return v1alpha1.NewIPAddressLister(f.Informer().GetIndexer()) +func (f *iPAddressInformer) Lister() networkingv1alpha1.IPAddressLister { + return networkingv1alpha1.NewIPAddressLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go index 57e602143..86af6d226 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + apinetworkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + networkingv1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ServiceCIDRs. type ServiceCIDRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ServiceCIDRLister + Lister() networkingv1alpha1.ServiceCIDRLister } type serviceCIDRInformer struct { @@ -70,7 +70,7 @@ func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod ti return client.NetworkingV1alpha1().ServiceCIDRs().Watch(context.TODO(), options) }, }, - &networkingv1alpha1.ServiceCIDR{}, + &apinetworkingv1alpha1.ServiceCIDR{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.ServiceCIDR{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1alpha1.ServiceCIDR{}, f.defaultInformer) } -func (f *serviceCIDRInformer) Lister() v1alpha1.ServiceCIDRLister { - return v1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer()) +func (f *serviceCIDRInformer) Lister() networkingv1alpha1.ServiceCIDRLister { + return networkingv1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go index 8800d6c9c..aa337d8e7 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressLister + Lister() networkingv1beta1.IngressLister } type ingressInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r return client.NetworkingV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, - &networkingv1beta1.Ingress{}, + &apinetworkingv1beta1.Ingress{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1beta1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1beta1.IngressLister { - return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() networkingv1beta1.IngressLister { + return networkingv1beta1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go index 17864299b..6ff9d5169 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IngressClasses. type IngressClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressClassLister + Lister() networkingv1beta1.IngressClassLister } type ingressClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod t return client.NetworkingV1beta1().IngressClasses().Watch(context.TODO(), options) }, }, - &networkingv1beta1.IngressClass{}, + &apinetworkingv1beta1.IngressClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *ingressClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *ingressClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.IngressClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1beta1.IngressClass{}, f.defaultInformer) } -func (f *ingressClassInformer) Lister() v1beta1.IngressClassLister { - return v1beta1.NewIngressClassLister(f.Informer().GetIndexer()) +func (f *ingressClassInformer) Lister() networkingv1beta1.IngressClassLister { + return networkingv1beta1.NewIngressClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go index 2a2dfa290..401ecd7cb 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IPAddresses. type IPAddressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IPAddressLister + Lister() networkingv1beta1.IPAddressLister } type iPAddressInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time return client.NetworkingV1beta1().IPAddresses().Watch(context.TODO(), options) }, }, - &networkingv1beta1.IPAddress{}, + &apinetworkingv1beta1.IPAddress{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.IPAddress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1beta1.IPAddress{}, f.defaultInformer) } -func (f *iPAddressInformer) Lister() v1beta1.IPAddressLister { - return v1beta1.NewIPAddressLister(f.Informer().GetIndexer()) +func (f *iPAddressInformer) Lister() networkingv1beta1.IPAddressLister { + return networkingv1beta1.NewIPAddressLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go index d5a9ce014..ff40692f2 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ServiceCIDRs. type ServiceCIDRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ServiceCIDRLister + Lister() networkingv1beta1.ServiceCIDRLister } type serviceCIDRInformer struct { @@ -70,7 +70,7 @@ func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod ti return client.NetworkingV1beta1().ServiceCIDRs().Watch(context.TODO(), options) }, }, - &networkingv1beta1.ServiceCIDR{}, + &apinetworkingv1beta1.ServiceCIDR{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.ServiceCIDR{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1beta1.ServiceCIDR{}, f.defaultInformer) } -func (f *serviceCIDRInformer) Lister() v1beta1.ServiceCIDRLister { - return v1beta1.NewServiceCIDRLister(f.Informer().GetIndexer()) +func (f *serviceCIDRInformer) Lister() networkingv1beta1.ServiceCIDRLister { + return networkingv1beta1.NewServiceCIDRLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go index 293f4e2e2..7fef7e332 100644 --- a/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - nodev1 "k8s.io/api/node/v1" + apinodev1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/node/v1" + nodev1 "k8s.io/client-go/listers/node/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RuntimeClasses. type RuntimeClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.RuntimeClassLister + Lister() nodev1.RuntimeClassLister } type runtimeClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t return client.NodeV1().RuntimeClasses().Watch(context.TODO(), options) }, }, - &nodev1.RuntimeClass{}, + &apinodev1.RuntimeClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1.RuntimeClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinodev1.RuntimeClass{}, f.defaultInformer) } -func (f *runtimeClassInformer) Lister() v1.RuntimeClassLister { - return v1.NewRuntimeClassLister(f.Informer().GetIndexer()) +func (f *runtimeClassInformer) Lister() nodev1.RuntimeClassLister { + return nodev1.NewRuntimeClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go index d314a9573..aee61406f 100644 --- a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - nodev1alpha1 "k8s.io/api/node/v1alpha1" + apinodev1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/node/v1alpha1" + nodev1alpha1 "k8s.io/client-go/listers/node/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RuntimeClasses. type RuntimeClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RuntimeClassLister + Lister() nodev1alpha1.RuntimeClassLister } type runtimeClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t return client.NodeV1alpha1().RuntimeClasses().Watch(context.TODO(), options) }, }, - &nodev1alpha1.RuntimeClass{}, + &apinodev1alpha1.RuntimeClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1alpha1.RuntimeClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinodev1alpha1.RuntimeClass{}, f.defaultInformer) } -func (f *runtimeClassInformer) Lister() v1alpha1.RuntimeClassLister { - return v1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) +func (f *runtimeClassInformer) Lister() nodev1alpha1.RuntimeClassLister { + return nodev1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go index 07619b230..ab9b8e0ee 100644 --- a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - nodev1beta1 "k8s.io/api/node/v1beta1" + apinodev1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/node/v1beta1" + nodev1beta1 "k8s.io/client-go/listers/node/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RuntimeClasses. type RuntimeClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.RuntimeClassLister + Lister() nodev1beta1.RuntimeClassLister } type runtimeClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t return client.NodeV1beta1().RuntimeClasses().Watch(context.TODO(), options) }, }, - &nodev1beta1.RuntimeClass{}, + &apinodev1beta1.RuntimeClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1beta1.RuntimeClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinodev1beta1.RuntimeClass{}, f.defaultInformer) } -func (f *runtimeClassInformer) Lister() v1beta1.RuntimeClassLister { - return v1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) +func (f *runtimeClassInformer) Lister() nodev1beta1.RuntimeClassLister { + return nodev1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go index 436598512..baacb59da 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - policyv1 "k8s.io/api/policy/v1" + apipolicyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/policy/v1" + policyv1 "k8s.io/client-go/listers/policy/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodDisruptionBudgets. type PodDisruptionBudgetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PodDisruptionBudgetLister + Lister() policyv1.PodDisruptionBudgetLister } type podDisruptionBudgetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa return client.PolicyV1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options) }, }, - &policyv1.PodDisruptionBudget{}, + &apipolicyv1.PodDisruptionBudget{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interfac } func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policyv1.PodDisruptionBudget{}, f.defaultInformer) + return f.factory.InformerFor(&apipolicyv1.PodDisruptionBudget{}, f.defaultInformer) } -func (f *podDisruptionBudgetInformer) Lister() v1.PodDisruptionBudgetLister { - return v1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) +func (f *podDisruptionBudgetInformer) Lister() policyv1.PodDisruptionBudgetLister { + return policyv1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index 4530343ec..081b2e08e 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - policyv1beta1 "k8s.io/api/policy/v1beta1" + apipolicyv1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/policy/v1beta1" + policyv1beta1 "k8s.io/client-go/listers/policy/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodDisruptionBudgets. type PodDisruptionBudgetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PodDisruptionBudgetLister + Lister() policyv1beta1.PodDisruptionBudgetLister } type podDisruptionBudgetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options) }, }, - &policyv1beta1.PodDisruptionBudget{}, + &apipolicyv1beta1.PodDisruptionBudget{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interfac } func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policyv1beta1.PodDisruptionBudget{}, f.defaultInformer) + return f.factory.InformerFor(&apipolicyv1beta1.PodDisruptionBudget{}, f.defaultInformer) } -func (f *podDisruptionBudgetInformer) Lister() v1beta1.PodDisruptionBudgetLister { - return v1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) +func (f *podDisruptionBudgetInformer) Lister() policyv1beta1.PodDisruptionBudgetLister { + return policyv1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go index 0572be264..0606fb464 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoles. type ClusterRoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ClusterRoleLister + Lister() rbacv1.ClusterRoleLister } type clusterRoleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti return client.RbacV1().ClusterRoles().Watch(context.TODO(), options) }, }, - &rbacv1.ClusterRole{}, + &apirbacv1.ClusterRole{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.ClusterRole{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.ClusterRole{}, f.defaultInformer) } -func (f *clusterRoleInformer) Lister() v1.ClusterRoleLister { - return v1.NewClusterRoleLister(f.Informer().GetIndexer()) +func (f *clusterRoleInformer) Lister() rbacv1.ClusterRoleLister { + return rbacv1.NewClusterRoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index 51026c055..dca087c9d 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoleBindings. type ClusterRoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ClusterRoleBindingLister + Lister() rbacv1.ClusterRoleBindingLister } type clusterRoleBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe return client.RbacV1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, - &rbacv1.ClusterRoleBinding{}, + &apirbacv1.ClusterRoleBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.ClusterRoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.ClusterRoleBinding{}, f.defaultInformer) } -func (f *clusterRoleBindingInformer) Lister() v1.ClusterRoleBindingLister { - return v1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +func (f *clusterRoleBindingInformer) Lister() rbacv1.ClusterRoleBindingLister { + return rbacv1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1/role.go index 986a5f29f..66f9c3f23 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/role.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Roles. type RoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.RoleLister + Lister() rbacv1.RoleLister } type roleInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy return client.RbacV1().Roles(namespace).Watch(context.TODO(), options) }, }, - &rbacv1.Role{}, + &apirbacv1.Role{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.Role{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.Role{}, f.defaultInformer) } -func (f *roleInformer) Lister() v1.RoleLister { - return v1.NewRoleLister(f.Informer().GetIndexer()) +func (f *roleInformer) Lister() rbacv1.RoleLister { + return rbacv1.NewRoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go index 0264049fb..6d72601a4 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RoleBindings. type RoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.RoleBindingLister + Lister() rbacv1.RoleBindingLister } type roleBindingInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin return client.RbacV1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, - &rbacv1.RoleBinding{}, + &apirbacv1.RoleBinding{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.RoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.RoleBinding{}, f.defaultInformer) } -func (f *roleBindingInformer) Lister() v1.RoleBindingLister { - return v1.NewRoleBindingLister(f.Informer().GetIndexer()) +func (f *roleBindingInformer) Lister() rbacv1.RoleBindingLister { + return rbacv1.NewRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index 70d9885f0..52249f6b4 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoles. type ClusterRoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterRoleLister + Lister() rbacv1alpha1.ClusterRoleLister } type clusterRoleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti return client.RbacV1alpha1().ClusterRoles().Watch(context.TODO(), options) }, }, - &rbacv1alpha1.ClusterRole{}, + &apirbacv1alpha1.ClusterRole{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.ClusterRole{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.ClusterRole{}, f.defaultInformer) } -func (f *clusterRoleInformer) Lister() v1alpha1.ClusterRoleLister { - return v1alpha1.NewClusterRoleLister(f.Informer().GetIndexer()) +func (f *clusterRoleInformer) Lister() rbacv1alpha1.ClusterRoleLister { + return rbacv1alpha1.NewClusterRoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index 8c18f6792..c8f7c4c10 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoleBindings. type ClusterRoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterRoleBindingLister + Lister() rbacv1alpha1.ClusterRoleBindingLister } type clusterRoleBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe return client.RbacV1alpha1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, - &rbacv1alpha1.ClusterRoleBinding{}, + &apirbacv1alpha1.ClusterRoleBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.ClusterRoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.ClusterRoleBinding{}, f.defaultInformer) } -func (f *clusterRoleBindingInformer) Lister() v1alpha1.ClusterRoleBindingLister { - return v1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +func (f *clusterRoleBindingInformer) Lister() rbacv1alpha1.ClusterRoleBindingLister { + return rbacv1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go index 7dc4551d9..dcdddc057 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Roles. type RoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RoleLister + Lister() rbacv1alpha1.RoleLister } type roleInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy return client.RbacV1alpha1().Roles(namespace).Watch(context.TODO(), options) }, }, - &rbacv1alpha1.Role{}, + &apirbacv1alpha1.Role{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.Role{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.Role{}, f.defaultInformer) } -func (f *roleInformer) Lister() v1alpha1.RoleLister { - return v1alpha1.NewRoleLister(f.Informer().GetIndexer()) +func (f *roleInformer) Lister() rbacv1alpha1.RoleLister { + return rbacv1alpha1.NewRoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index d49ec8b36..9184a5baf 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RoleBindings. type RoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RoleBindingLister + Lister() rbacv1alpha1.RoleBindingLister } type roleBindingInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin return client.RbacV1alpha1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, - &rbacv1alpha1.RoleBinding{}, + &apirbacv1alpha1.RoleBinding{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.RoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.RoleBinding{}, f.defaultInformer) } -func (f *roleBindingInformer) Lister() v1alpha1.RoleBindingLister { - return v1alpha1.NewRoleBindingLister(f.Informer().GetIndexer()) +func (f *roleBindingInformer) Lister() rbacv1alpha1.RoleBindingLister { + return rbacv1alpha1.NewRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index e50e1d393..d86dd771a 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoles. type ClusterRoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ClusterRoleLister + Lister() rbacv1beta1.ClusterRoleLister } type clusterRoleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti return client.RbacV1beta1().ClusterRoles().Watch(context.TODO(), options) }, }, - &rbacv1beta1.ClusterRole{}, + &apirbacv1beta1.ClusterRole{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.ClusterRole{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.ClusterRole{}, f.defaultInformer) } -func (f *clusterRoleInformer) Lister() v1beta1.ClusterRoleLister { - return v1beta1.NewClusterRoleLister(f.Informer().GetIndexer()) +func (f *clusterRoleInformer) Lister() rbacv1beta1.ClusterRoleLister { + return rbacv1beta1.NewClusterRoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index a7ea4cd38..70c1cd984 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoleBindings. type ClusterRoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ClusterRoleBindingLister + Lister() rbacv1beta1.ClusterRoleBindingLister } type clusterRoleBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe return client.RbacV1beta1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, - &rbacv1beta1.ClusterRoleBinding{}, + &apirbacv1beta1.ClusterRoleBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.ClusterRoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.ClusterRoleBinding{}, f.defaultInformer) } -func (f *clusterRoleBindingInformer) Lister() v1beta1.ClusterRoleBindingLister { - return v1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +func (f *clusterRoleBindingInformer) Lister() rbacv1beta1.ClusterRoleBindingLister { + return rbacv1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go index e56961e81..2995e1e63 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Roles. type RoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.RoleLister + Lister() rbacv1beta1.RoleLister } type roleInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy return client.RbacV1beta1().Roles(namespace).Watch(context.TODO(), options) }, }, - &rbacv1beta1.Role{}, + &apirbacv1beta1.Role{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.Role{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.Role{}, f.defaultInformer) } -func (f *roleInformer) Lister() v1beta1.RoleLister { - return v1beta1.NewRoleLister(f.Informer().GetIndexer()) +func (f *roleInformer) Lister() rbacv1beta1.RoleLister { + return rbacv1beta1.NewRoleLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index d893882db..11854f38d 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RoleBindings. type RoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.RoleBindingLister + Lister() rbacv1beta1.RoleBindingLister } type roleBindingInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin return client.RbacV1beta1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, - &rbacv1beta1.RoleBinding{}, + &apirbacv1beta1.RoleBinding{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.RoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.RoleBinding{}, f.defaultInformer) } -func (f *roleBindingInformer) Lister() v1beta1.RoleBindingLister { - return v1beta1.NewRoleBindingLister(f.Informer().GetIndexer()) +func (f *roleBindingInformer) Lister() rbacv1beta1.RoleBindingLister { + return rbacv1beta1.NewRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/interface.go b/vendor/k8s.io/client-go/informers/resource/interface.go index 170d29d80..0d75732af 100644 --- a/vendor/k8s.io/client-go/informers/resource/interface.go +++ b/vendor/k8s.io/client-go/informers/resource/interface.go @@ -21,12 +21,15 @@ package resource import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" v1alpha3 "k8s.io/client-go/informers/resource/v1alpha3" + v1beta1 "k8s.io/client-go/informers/resource/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { // V1alpha3 provides access to shared informers for resources in V1alpha3. V1alpha3() v1alpha3.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1alpha3() v1alpha3.Interface { return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go index c0bcbd190..da322c8d0 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" time "time" - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DeviceClasses. type DeviceClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha3.DeviceClassLister + Lister() resourcev1alpha3.DeviceClassLister } type deviceClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod ti return client.ResourceV1alpha3().DeviceClasses().Watch(context.TODO(), options) }, }, - &resourcev1alpha3.DeviceClass{}, + &apiresourcev1alpha3.DeviceClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *deviceClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha3.DeviceClass{}, f.defaultInformer) + return f.factory.InformerFor(&apiresourcev1alpha3.DeviceClass{}, f.defaultInformer) } -func (f *deviceClassInformer) Lister() v1alpha3.DeviceClassLister { - return v1alpha3.NewDeviceClassLister(f.Informer().GetIndexer()) +func (f *deviceClassInformer) Lister() resourcev1alpha3.DeviceClassLister { + return resourcev1alpha3.NewDeviceClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go index 481a7de45..356c46179 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go @@ -26,8 +26,6 @@ import ( type Interface interface { // DeviceClasses returns a DeviceClassInformer. DeviceClasses() DeviceClassInformer - // PodSchedulingContexts returns a PodSchedulingContextInformer. - PodSchedulingContexts() PodSchedulingContextInformer // ResourceClaims returns a ResourceClaimInformer. ResourceClaims() ResourceClaimInformer // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. @@ -52,11 +50,6 @@ func (v *version) DeviceClasses() DeviceClassInformer { return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } -// PodSchedulingContexts returns a PodSchedulingContextInformer. -func (v *version) PodSchedulingContexts() PodSchedulingContextInformer { - return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // ResourceClaims returns a ResourceClaimInformer. func (v *version) ResourceClaims() ResourceClaimInformer { return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go index fa644579b..822d145bc 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" time "time" - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaims. type ResourceClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha3.ResourceClaimLister + Lister() resourcev1alpha3.ResourceClaimLister } type resourceClaimInformer struct { @@ -71,7 +71,7 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str return client.ResourceV1alpha3().ResourceClaims(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha3.ResourceClaim{}, + &apiresourcev1alpha3.ResourceClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha3.ResourceClaim{}, f.defaultInformer) + return f.factory.InformerFor(&apiresourcev1alpha3.ResourceClaim{}, f.defaultInformer) } -func (f *resourceClaimInformer) Lister() v1alpha3.ResourceClaimLister { - return v1alpha3.NewResourceClaimLister(f.Informer().GetIndexer()) +func (f *resourceClaimInformer) Lister() resourcev1alpha3.ResourceClaimLister { + return resourcev1alpha3.NewResourceClaimLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go index 294755661..94680730a 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" time "time" - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaimTemplates. type ResourceClaimTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha3.ResourceClaimTemplateLister + Lister() resourcev1alpha3.ResourceClaimTemplateLister } type resourceClaimTemplateInformer struct { @@ -71,7 +71,7 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha3.ResourceClaimTemplate{}, + &apiresourcev1alpha3.ResourceClaimTemplate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interf } func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha3.ResourceClaimTemplate{}, f.defaultInformer) + return f.factory.InformerFor(&apiresourcev1alpha3.ResourceClaimTemplate{}, f.defaultInformer) } -func (f *resourceClaimTemplateInformer) Lister() v1alpha3.ResourceClaimTemplateLister { - return v1alpha3.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +func (f *resourceClaimTemplateInformer) Lister() resourcev1alpha3.ResourceClaimTemplateLister { + return resourcev1alpha3.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go index 108083530..15394575f 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" time "time" - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceSlices. type ResourceSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha3.ResourceSliceLister + Lister() resourcev1alpha3.ResourceSliceLister } type resourceSliceInformer struct { @@ -70,7 +70,7 @@ func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod return client.ResourceV1alpha3().ResourceSlices().Watch(context.TODO(), options) }, }, - &resourcev1alpha3.ResourceSlice{}, + &apiresourcev1alpha3.ResourceSlice{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha3.ResourceSlice{}, f.defaultInformer) + return f.factory.InformerFor(&apiresourcev1alpha3.ResourceSlice{}, f.defaultInformer) } -func (f *resourceSliceInformer) Lister() v1alpha3.ResourceSliceLister { - return v1alpha3.NewResourceSliceLister(f.Informer().GetIndexer()) +func (f *resourceSliceInformer) Lister() resourcev1alpha3.ResourceSliceLister { + return resourcev1alpha3.NewResourceSliceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go b/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..9623788c4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassInformer provides access to a shared informer and lister for +// DeviceClasses. +type DeviceClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.DeviceClassLister +} + +type deviceClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().DeviceClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().DeviceClasses().Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.DeviceClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deviceClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.DeviceClass{}, f.defaultInformer) +} + +func (f *deviceClassInformer) Lister() resourcev1beta1.DeviceClassLister { + return resourcev1beta1.NewDeviceClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go new file mode 100644 index 000000000..07330763b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DeviceClasses returns a DeviceClassInformer. + DeviceClasses() DeviceClassInformer + // ResourceClaims returns a ResourceClaimInformer. + ResourceClaims() ResourceClaimInformer + // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. + ResourceClaimTemplates() ResourceClaimTemplateInformer + // ResourceSlices returns a ResourceSliceInformer. + ResourceSlices() ResourceSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DeviceClasses returns a DeviceClassInformer. +func (v *version) DeviceClasses() DeviceClassInformer { + return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaims returns a ResourceClaimInformer. +func (v *version) ResourceClaims() ResourceClaimInformer { + return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaimTemplates returns a ResourceClaimTemplateInformer. +func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { + return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceSlices returns a ResourceSliceInformer. +func (v *version) ResourceSlices() ResourceSliceInformer { + return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go b/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..107b7fda7 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimInformer provides access to a shared informer and lister for +// ResourceClaims. +type ResourceClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.ResourceClaimLister +} + +type resourceClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceClaims(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceClaims(namespace).Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.ResourceClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.ResourceClaim{}, f.defaultInformer) +} + +func (f *resourceClaimInformer) Lister() resourcev1beta1.ResourceClaimLister { + return resourcev1beta1.NewResourceClaimLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go similarity index 50% rename from vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go rename to vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go index 62fb3614f..9ae634ad0 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go @@ -16,75 +16,75 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha3 +package v1beta1 import ( - "context" + context "context" time "time" - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" cache "k8s.io/client-go/tools/cache" ) -// PodSchedulingContextInformer provides access to a shared informer and lister for -// PodSchedulingContexts. -type PodSchedulingContextInformer interface { +// ResourceClaimTemplateInformer provides access to a shared informer and lister for +// ResourceClaimTemplates. +type ResourceClaimTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha3.PodSchedulingContextLister + Lister() resourcev1beta1.ResourceClaimTemplateLister } -type podSchedulingContextInformer struct { +type resourceClaimTemplateInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. +// NewResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodSchedulingContextInformer(client, namespace, resyncPeriod, indexers, nil) +func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. +// NewFilteredResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha3().PodSchedulingContexts(namespace).List(context.TODO(), options) + return client.ResourceV1beta1().ResourceClaimTemplates(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha3().PodSchedulingContexts(namespace).Watch(context.TODO(), options) + return client.ResourceV1beta1().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha3.PodSchedulingContext{}, + &apiresourcev1beta1.ResourceClaimTemplate{}, resyncPeriod, indexers, ) } -func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodSchedulingContextInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha3.PodSchedulingContext{}, f.defaultInformer) +func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.ResourceClaimTemplate{}, f.defaultInformer) } -func (f *podSchedulingContextInformer) Lister() v1alpha3.PodSchedulingContextLister { - return v1alpha3.NewPodSchedulingContextLister(f.Informer().GetIndexer()) +func (f *resourceClaimTemplateInformer) Lister() resourcev1beta1.ResourceClaimTemplateLister { + return resourcev1beta1.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go b/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..8ab6cb4fc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceSliceInformer provides access to a shared informer and lister for +// ResourceSlices. +type ResourceSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.ResourceSliceLister +} + +type resourceSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewResourceSliceInformer constructs a new informer for ResourceSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceSlices().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceSlices().Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.ResourceSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.ResourceSlice{}, f.defaultInformer) +} + +func (f *resourceSliceInformer) Lister() resourcev1beta1.ResourceSliceLister { + return resourcev1beta1.NewResourceSliceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go index 730616b4a..20b9fc0dc 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - schedulingv1 "k8s.io/api/scheduling/v1" + apischedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/scheduling/v1" + schedulingv1 "k8s.io/client-go/listers/scheduling/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityClasses. type PriorityClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PriorityClassLister + Lister() schedulingv1.PriorityClassLister } type priorityClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod return client.SchedulingV1().PriorityClasses().Watch(context.TODO(), options) }, }, - &schedulingv1.PriorityClass{}, + &apischedulingv1.PriorityClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1.PriorityClass{}, f.defaultInformer) + return f.factory.InformerFor(&apischedulingv1.PriorityClass{}, f.defaultInformer) } -func (f *priorityClassInformer) Lister() v1.PriorityClassLister { - return v1.NewPriorityClassLister(f.Informer().GetIndexer()) +func (f *priorityClassInformer) Lister() schedulingv1.PriorityClassLister { + return schedulingv1.NewPriorityClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index f82b66436..904bc6c4e 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + apischedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" + schedulingv1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityClasses. type PriorityClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.PriorityClassLister + Lister() schedulingv1alpha1.PriorityClassLister } type priorityClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod return client.SchedulingV1alpha1().PriorityClasses().Watch(context.TODO(), options) }, }, - &schedulingv1alpha1.PriorityClass{}, + &apischedulingv1alpha1.PriorityClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1alpha1.PriorityClass{}, f.defaultInformer) + return f.factory.InformerFor(&apischedulingv1alpha1.PriorityClass{}, f.defaultInformer) } -func (f *priorityClassInformer) Lister() v1alpha1.PriorityClassLister { - return v1alpha1.NewPriorityClassLister(f.Informer().GetIndexer()) +func (f *priorityClassInformer) Lister() schedulingv1alpha1.PriorityClassLister { + return schedulingv1alpha1.NewPriorityClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go index fc7848891..299d37673 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + apischedulingv1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/scheduling/v1beta1" + schedulingv1beta1 "k8s.io/client-go/listers/scheduling/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityClasses. type PriorityClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PriorityClassLister + Lister() schedulingv1beta1.PriorityClassLister } type priorityClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod return client.SchedulingV1beta1().PriorityClasses().Watch(context.TODO(), options) }, }, - &schedulingv1beta1.PriorityClass{}, + &apischedulingv1beta1.PriorityClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1beta1.PriorityClass{}, f.defaultInformer) + return f.factory.InformerFor(&apischedulingv1beta1.PriorityClass{}, f.defaultInformer) } -func (f *priorityClassInformer) Lister() v1beta1.PriorityClassLister { - return v1beta1.NewPriorityClassLister(f.Informer().GetIndexer()) +func (f *priorityClassInformer) Lister() schedulingv1beta1.PriorityClassLister { + return schedulingv1beta1.NewPriorityClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go b/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go index 6fd1e678d..79282873b 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIDrivers. type CSIDriverInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CSIDriverLister + Lister() storagev1.CSIDriverLister } type cSIDriverInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time return client.StorageV1().CSIDrivers().Watch(context.TODO(), options) }, }, - &storagev1.CSIDriver{}, + &apistoragev1.CSIDriver{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.CSIDriver{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.CSIDriver{}, f.defaultInformer) } -func (f *cSIDriverInformer) Lister() v1.CSIDriverLister { - return v1.NewCSIDriverLister(f.Informer().GetIndexer()) +func (f *cSIDriverInformer) Lister() storagev1.CSIDriverLister { + return storagev1.NewCSIDriverLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1/csinode.go index 96416967f..00345f897 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/csinode.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSINodes. type CSINodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CSINodeLister + Lister() storagev1.CSINodeLister } type cSINodeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D return client.StorageV1().CSINodes().Watch(context.TODO(), options) }, }, - &storagev1.CSINode{}, + &apistoragev1.CSINode{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.CSINode{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.CSINode{}, f.defaultInformer) } -func (f *cSINodeInformer) Lister() v1.CSINodeLister { - return v1.NewCSINodeLister(f.Informer().GetIndexer()) +func (f *cSINodeInformer) Lister() storagev1.CSINodeLister { + return storagev1.NewCSINodeLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go index 9b9095f3a..5a72272fc 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIStorageCapacities. type CSIStorageCapacityInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CSIStorageCapacityLister + Lister() storagev1.CSIStorageCapacityLister } type cSIStorageCapacityInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac return client.StorageV1().CSIStorageCapacities(namespace).Watch(context.TODO(), options) }, }, - &storagev1.CSIStorageCapacity{}, + &apistoragev1.CSIStorageCapacity{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface } func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.CSIStorageCapacity{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.CSIStorageCapacity{}, f.defaultInformer) } -func (f *cSIStorageCapacityInformer) Lister() v1.CSIStorageCapacityLister { - return v1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) +func (f *cSIStorageCapacityInformer) Lister() storagev1.CSIStorageCapacityLister { + return storagev1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go index 8cde79d9a..6eecc50f7 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageClasses. type StorageClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.StorageClassLister + Lister() storagev1.StorageClassLister } type storageClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t return client.StorageV1().StorageClasses().Watch(context.TODO(), options) }, }, - &storagev1.StorageClass{}, + &apistoragev1.StorageClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.StorageClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.StorageClass{}, f.defaultInformer) } -func (f *storageClassInformer) Lister() v1.StorageClassLister { - return v1.NewStorageClassLister(f.Informer().GetIndexer()) +func (f *storageClassInformer) Lister() storagev1.StorageClassLister { + return storagev1.NewStorageClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go index be605ff48..deca09cda 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttachments. type VolumeAttachmentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.VolumeAttachmentLister + Lister() storagev1.VolumeAttachmentLister } type volumeAttachmentInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri return client.StorageV1().VolumeAttachments().Watch(context.TODO(), options) }, }, - &storagev1.VolumeAttachment{}, + &apistoragev1.VolumeAttachment{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, } func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.VolumeAttachment{}, f.defaultInformer) } -func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister { - return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +func (f *volumeAttachmentInformer) Lister() storagev1.VolumeAttachmentLister { + return storagev1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go index e59dfab2d..2253f700e 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apistoragev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagev1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIStorageCapacities. type CSIStorageCapacityInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.CSIStorageCapacityLister + Lister() storagev1alpha1.CSIStorageCapacityLister } type cSIStorageCapacityInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac return client.StorageV1alpha1().CSIStorageCapacities(namespace).Watch(context.TODO(), options) }, }, - &storagev1alpha1.CSIStorageCapacity{}, + &apistoragev1alpha1.CSIStorageCapacity{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface } func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.CSIStorageCapacity{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1alpha1.CSIStorageCapacity{}, f.defaultInformer) } -func (f *cSIStorageCapacityInformer) Lister() v1alpha1.CSIStorageCapacityLister { - return v1alpha1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) +func (f *cSIStorageCapacityInformer) Lister() storagev1alpha1.CSIStorageCapacityLister { + return storagev1alpha1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go index 445496dad..f31989953 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apistoragev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagev1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttachments. type VolumeAttachmentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.VolumeAttachmentLister + Lister() storagev1alpha1.VolumeAttachmentLister } type volumeAttachmentInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri return client.StorageV1alpha1().VolumeAttachments().Watch(context.TODO(), options) }, }, - &storagev1alpha1.VolumeAttachment{}, + &apistoragev1alpha1.VolumeAttachment{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, } func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.VolumeAttachment{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1alpha1.VolumeAttachment{}, f.defaultInformer) } -func (f *volumeAttachmentInformer) Lister() v1alpha1.VolumeAttachmentLister { - return v1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +func (f *volumeAttachmentInformer) Lister() storagev1alpha1.VolumeAttachmentLister { + return storagev1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go index 5e62e2f42..8a688312a 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apistoragev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagev1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttributesClasses. type VolumeAttributesClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.VolumeAttributesClassLister + Lister() storagev1alpha1.VolumeAttributesClassLister } type volumeAttributesClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyn return client.StorageV1alpha1().VolumeAttributesClasses().Watch(context.TODO(), options) }, }, - &storagev1alpha1.VolumeAttributesClass{}, + &apistoragev1alpha1.VolumeAttributesClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interf } func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.VolumeAttributesClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1alpha1.VolumeAttributesClass{}, f.defaultInformer) } -func (f *volumeAttributesClassInformer) Lister() v1alpha1.VolumeAttributesClassLister { - return v1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +func (f *volumeAttributesClassInformer) Lister() storagev1alpha1.VolumeAttributesClassLister { + return storagev1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go index f138a915b..f538deed5 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIDrivers. type CSIDriverInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CSIDriverLister + Lister() storagev1beta1.CSIDriverLister } type cSIDriverInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time return client.StorageV1beta1().CSIDrivers().Watch(context.TODO(), options) }, }, - &storagev1beta1.CSIDriver{}, + &apistoragev1beta1.CSIDriver{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSIDriver{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.CSIDriver{}, f.defaultInformer) } -func (f *cSIDriverInformer) Lister() v1beta1.CSIDriverLister { - return v1beta1.NewCSIDriverLister(f.Informer().GetIndexer()) +func (f *cSIDriverInformer) Lister() storagev1beta1.CSIDriverLister { + return storagev1beta1.NewCSIDriverLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go index 6ba63172a..5d26cffdc 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSINodes. type CSINodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CSINodeLister + Lister() storagev1beta1.CSINodeLister } type cSINodeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D return client.StorageV1beta1().CSINodes().Watch(context.TODO(), options) }, }, - &storagev1beta1.CSINode{}, + &apistoragev1beta1.CSINode{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSINode{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.CSINode{}, f.defaultInformer) } -func (f *cSINodeInformer) Lister() v1beta1.CSINodeLister { - return v1beta1.NewCSINodeLister(f.Informer().GetIndexer()) +func (f *cSINodeInformer) Lister() storagev1beta1.CSINodeLister { + return storagev1beta1.NewCSINodeLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go index 8f0cc4668..9ad42e9f8 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIStorageCapacities. type CSIStorageCapacityInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CSIStorageCapacityLister + Lister() storagev1beta1.CSIStorageCapacityLister } type cSIStorageCapacityInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac return client.StorageV1beta1().CSIStorageCapacities(namespace).Watch(context.TODO(), options) }, }, - &storagev1beta1.CSIStorageCapacity{}, + &apistoragev1beta1.CSIStorageCapacity{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface } func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSIStorageCapacity{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.CSIStorageCapacity{}, f.defaultInformer) } -func (f *cSIStorageCapacityInformer) Lister() v1beta1.CSIStorageCapacityLister { - return v1beta1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) +func (f *cSIStorageCapacityInformer) Lister() storagev1beta1.CSIStorageCapacityLister { + return storagev1beta1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index a6582bf3d..2d8649e9b 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageClasses. type StorageClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.StorageClassLister + Lister() storagev1beta1.StorageClassLister } type storageClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t return client.StorageV1beta1().StorageClasses().Watch(context.TODO(), options) }, }, - &storagev1beta1.StorageClass{}, + &apistoragev1beta1.StorageClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.StorageClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.StorageClass{}, f.defaultInformer) } -func (f *storageClassInformer) Lister() v1beta1.StorageClassLister { - return v1beta1.NewStorageClassLister(f.Informer().GetIndexer()) +func (f *storageClassInformer) Lister() storagev1beta1.StorageClassLister { + return storagev1beta1.NewStorageClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go index e89424634..93d382693 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttachments. type VolumeAttachmentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.VolumeAttachmentLister + Lister() storagev1beta1.VolumeAttachmentLister } type volumeAttachmentInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri return client.StorageV1beta1().VolumeAttachments().Watch(context.TODO(), options) }, }, - &storagev1beta1.VolumeAttachment{}, + &apistoragev1beta1.VolumeAttachment{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, } func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.VolumeAttachment{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.VolumeAttachment{}, f.defaultInformer) } -func (f *volumeAttachmentInformer) Lister() v1beta1.VolumeAttachmentLister { - return v1beta1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +func (f *volumeAttachmentInformer) Lister() storagev1beta1.VolumeAttachmentLister { + return storagev1beta1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go index ede90ce43..dd9734bdc 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttributesClasses. type VolumeAttributesClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.VolumeAttributesClassLister + Lister() storagev1beta1.VolumeAttributesClassLister } type volumeAttributesClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyn return client.StorageV1beta1().VolumeAttributesClasses().Watch(context.TODO(), options) }, }, - &storagev1beta1.VolumeAttributesClass{}, + &apistoragev1beta1.VolumeAttributesClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interf } func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.VolumeAttributesClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.VolumeAttributesClass{}, f.defaultInformer) } -func (f *volumeAttributesClassInformer) Lister() v1beta1.VolumeAttributesClassLister { - return v1beta1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +func (f *volumeAttributesClassInformer) Lister() storagev1beta1.VolumeAttributesClassLister { + return storagev1beta1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go index 70e7c7279..49d6dd2e5 100644 --- a/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + apistoragemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1" + storagemigrationv1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageVersionMigrations. type StorageVersionMigrationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.StorageVersionMigrationLister + Lister() storagemigrationv1alpha1.StorageVersionMigrationLister } type storageVersionMigrationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageVersionMigrationInformer(client kubernetes.Interface, res return client.StoragemigrationV1alpha1().StorageVersionMigrations().Watch(context.TODO(), options) }, }, - &storagemigrationv1alpha1.StorageVersionMigration{}, + &apistoragemigrationv1alpha1.StorageVersionMigration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageVersionMigrationInformer) defaultInformer(client kubernetes.Inte } func (f *storageVersionMigrationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer) } -func (f *storageVersionMigrationInformer) Lister() v1alpha1.StorageVersionMigrationLister { - return v1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer()) +func (f *storageVersionMigrationInformer) Lister() storagemigrationv1alpha1.StorageVersionMigrationLister { + return storagemigrationv1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 9cddb0bbe..a6dbc23a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -19,8 +19,8 @@ limitations under the License. package kubernetes import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" @@ -45,7 +45,7 @@ import ( certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" - coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" @@ -69,6 +69,7 @@ import ( rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" + resourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -103,7 +104,7 @@ type Interface interface { CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface - CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface + CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -127,6 +128,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface @@ -161,7 +163,7 @@ type Clientset struct { certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client - coordinationV1alpha1 *coordinationv1alpha1.CoordinationV1alpha1Client + coordinationV1alpha2 *coordinationv1alpha2.CoordinationV1alpha2Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -185,6 +187,7 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + resourceV1beta1 *resourcev1beta1.ResourceV1beta1Client resourceV1alpha3 *resourcev1alpha3.ResourceV1alpha3Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client @@ -300,9 +303,9 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return c.certificatesV1alpha1 } -// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client -func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { - return c.coordinationV1alpha1 +// CoordinationV1alpha2 retrieves the CoordinationV1alpha2Client +func (c *Clientset) CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface { + return c.coordinationV1alpha2 } // CoordinationV1beta1 retrieves the CoordinationV1beta1Client @@ -420,6 +423,11 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } +// ResourceV1beta1 retrieves the ResourceV1beta1Client +func (c *Clientset) ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface { + return c.resourceV1beta1 +} + // ResourceV1alpha3 retrieves the ResourceV1alpha3Client func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { return c.resourceV1alpha3 @@ -588,7 +596,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.coordinationV1alpha1, err = coordinationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.coordinationV1alpha2, err = coordinationv1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -684,6 +692,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.resourceV1beta1, err = resourcev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -758,7 +770,7 @@ func New(c rest.Interface) *Clientset { cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) - cs.coordinationV1alpha1 = coordinationv1alpha1.New(c) + cs.coordinationV1alpha2 = coordinationv1alpha2.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -782,6 +794,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) + cs.resourceV1beta1 = resourcev1beta1.New(c) cs.resourceV1alpha3 = resourcev1alpha3.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 132f917ab..6b583818b 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -69,8 +69,8 @@ import ( fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" fakecoordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1/fake" - coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" - fakecoordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake" + coordinationv1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" + fakecoordinationv1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -117,6 +117,8 @@ import ( fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" fakeresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake" + resourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" + fakeresourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" fakeschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" @@ -325,9 +327,9 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake} } -// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client -func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { - return &fakecoordinationv1alpha1.FakeCoordinationV1alpha1{Fake: &c.Fake} +// CoordinationV1alpha2 retrieves the CoordinationV1alpha2Client +func (c *Clientset) CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface { + return &fakecoordinationv1alpha2.FakeCoordinationV1alpha2{Fake: &c.Fake} } // CoordinationV1beta1 retrieves the CoordinationV1beta1Client @@ -445,6 +447,11 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} } +// ResourceV1beta1 retrieves the ResourceV1beta1Client +func (c *Clientset) ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface { + return &fakeresourcev1beta1.FakeResourceV1beta1{Fake: &c.Fake} +} + // ResourceV1alpha3 retrieves the ResourceV1alpha3Client func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { return &fakeresourcev1alpha3.FakeResourceV1alpha3{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 157abae5f..849b1ac90 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -41,7 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" - coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -65,6 +65,7 @@ import ( rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -104,7 +105,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, - coordinationv1alpha1.AddToScheme, + coordinationv1alpha2.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -128,6 +129,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, + resourcev1beta1.AddToScheme, resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 5262b0f04..a9a5d8eb7 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -41,7 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" - coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -65,6 +65,7 @@ import ( rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -104,7 +105,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, - coordinationv1alpha1.AddToScheme, + coordinationv1alpha2.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -128,6 +129,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, + resourcev1beta1.AddToScheme, resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go index a81b2b682..74d2967f6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/client-go/kubernetes/scheme" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := admissionregistrationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go index b7487c2fb..1a948ad93 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go @@ -29,19 +29,19 @@ type FakeAdmissionregistrationV1 struct { } func (c *FakeAdmissionregistrationV1) MutatingWebhookConfigurations() v1.MutatingWebhookConfigurationInterface { - return &FakeMutatingWebhookConfigurations{c} + return newFakeMutatingWebhookConfigurations(c) } func (c *FakeAdmissionregistrationV1) ValidatingAdmissionPolicies() v1.ValidatingAdmissionPolicyInterface { - return &FakeValidatingAdmissionPolicies{c} + return newFakeValidatingAdmissionPolicies(c) } func (c *FakeAdmissionregistrationV1) ValidatingAdmissionPolicyBindings() v1.ValidatingAdmissionPolicyBindingInterface { - return &FakeValidatingAdmissionPolicyBindings{c} + return newFakeValidatingAdmissionPolicyBindings(c) } func (c *FakeAdmissionregistrationV1) ValidatingWebhookConfigurations() v1.ValidatingWebhookConfigurationInterface { - return &FakeValidatingWebhookConfigurations{c} + return newFakeValidatingWebhookConfigurations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index 2d371e6fc..3dda32248 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" ) -// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface -type FakeMutatingWebhookConfigurations struct { +// fakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type fakeMutatingWebhookConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] Fake *FakeAdmissionregistrationV1 } -var mutatingwebhookconfigurationsResource = v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations") - -var mutatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration") - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { - emptyResult := &v1.MutatingWebhookConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1.MutatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1.MutatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &v1.MutatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.MutatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - emptyResult := &v1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeMutatingWebhookConfigurations(fake *FakeAdmissionregistrationV1) typedadmissionregistrationv1.MutatingWebhookConfigurationInterface { + return &fakeMutatingWebhookConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"), + v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"), + func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} }, + func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }, + func(dst, src *v1.MutatingWebhookConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1.MutatingWebhookConfigurationList) []*v1.MutatingWebhookConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.MutatingWebhookConfigurationList, items []*v1.MutatingWebhookConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.MutatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go index d6c7bec89..4ad05e8fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" ) -// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface -type FakeValidatingAdmissionPolicies struct { +// fakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type fakeValidatingAdmissionPolicies struct { + *gentype.FakeClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] Fake *FakeAdmissionregistrationV1 } -var validatingadmissionpoliciesResource = v1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") - -var validatingadmissionpoliciesKind = v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) { - emptyResult := &v1.ValidatingAdmissionPolicyList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1.ValidatingAdmissionPolicyList).ListMeta} - for _, item := range obj.(*v1.ValidatingAdmissionPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1.ValidatingAdmissionPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - emptyResult := &v1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingAdmissionPolicies(fake *FakeAdmissionregistrationV1) typedadmissionregistrationv1.ValidatingAdmissionPolicyInterface { + return &fakeValidatingAdmissionPolicies{ + gentype.NewFakeClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"), + v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"), + func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} }, + func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }, + func(dst, src *v1.ValidatingAdmissionPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ValidatingAdmissionPolicyList) []*v1.ValidatingAdmissionPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ValidatingAdmissionPolicyList, items []*v1.ValidatingAdmissionPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go index 5b6719be0..f222663f4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" ) -// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface -type FakeValidatingAdmissionPolicyBindings struct { +// fakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type fakeValidatingAdmissionPolicyBindings struct { + *gentype.FakeClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] Fake *FakeAdmissionregistrationV1 } -var validatingadmissionpolicybindingsResource = v1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") - -var validatingadmissionpolicybindingsKind = v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) { - emptyResult := &v1.ValidatingAdmissionPolicyBindingList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1.ValidatingAdmissionPolicyBindingList).ListMeta} - for _, item := range obj.(*v1.ValidatingAdmissionPolicyBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1.ValidatingAdmissionPolicyBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyBindingList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - emptyResult := &v1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingAdmissionPolicyBindings(fake *FakeAdmissionregistrationV1) typedadmissionregistrationv1.ValidatingAdmissionPolicyBindingInterface { + return &fakeValidatingAdmissionPolicyBindings{ + gentype.NewFakeClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"), + v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"), + func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} }, + func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }, + func(dst, src *v1.ValidatingAdmissionPolicyBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ValidatingAdmissionPolicyBindingList) []*v1.ValidatingAdmissionPolicyBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ValidatingAdmissionPolicyBindingList, items []*v1.ValidatingAdmissionPolicyBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index ff7fc4301..947db961a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" ) -// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface -type FakeValidatingWebhookConfigurations struct { +// fakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type fakeValidatingWebhookConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] Fake *FakeAdmissionregistrationV1 } -var validatingwebhookconfigurationsResource = v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations") - -var validatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { - emptyResult := &v1.ValidatingWebhookConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1.ValidatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1.ValidatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &v1.ValidatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ValidatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - emptyResult := &v1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingWebhookConfigurations(fake *FakeAdmissionregistrationV1) typedadmissionregistrationv1.ValidatingWebhookConfigurationInterface { + return &fakeValidatingWebhookConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"), + v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"), + func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} }, + func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }, + func(dst, src *v1.ValidatingWebhookConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ValidatingWebhookConfigurationList) []*v1.ValidatingWebhookConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ValidatingWebhookConfigurationList, items []*v1.ValidatingWebhookConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ValidatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index e863766c6..d46a3c987 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,38 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.MutatingWebhookConfiguration, error) - Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.MutatingWebhookConfiguration, error) + Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.MutatingWebhookConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) - Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) + Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - *gentype.ClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1.MutatingWebhookConfiguration, *admissionregistrationv1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - gentype.NewClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1.MutatingWebhookConfiguration, *admissionregistrationv1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( "mutatingwebhookconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} }, - func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }), + func() *admissionregistrationv1.MutatingWebhookConfiguration { + return &admissionregistrationv1.MutatingWebhookConfiguration{} + }, + func() *admissionregistrationv1.MutatingWebhookConfigurationList { + return &admissionregistrationv1.MutatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.MutatingWebhookConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go index 1b20e6960..2d56ab168 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface { // ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. type ValidatingAdmissionPolicyInterface interface { - Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error) - Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicy, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) - Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicy, *admissionregistrationv1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicy, *admissionregistrationv1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( "validatingadmissionpolicies", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} }, - func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }), + func() *admissionregistrationv1.ValidatingAdmissionPolicy { + return &admissionregistrationv1.ValidatingAdmissionPolicy{} + }, + func() *admissionregistrationv1.ValidatingAdmissionPolicyList { + return &admissionregistrationv1.ValidatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingAdmissionPolicy](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go index 44694b232..d3eaa0d2d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface { // ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. type ValidatingAdmissionPolicyBindingInterface interface { - Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) - Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicyBinding, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyBindingList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) - Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error) ValidatingAdmissionPolicyBindingExpansion } // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( "validatingadmissionpolicybindings", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} }, - func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }), + func() *admissionregistrationv1.ValidatingAdmissionPolicyBinding { + return &admissionregistrationv1.ValidatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1.ValidatingAdmissionPolicyBindingList { + return &admissionregistrationv1.ValidatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingAdmissionPolicyBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index 11b4ac059..f8f60f681 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,38 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.ValidatingWebhookConfiguration, error) - Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.ValidatingWebhookConfiguration, error) + Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingWebhookConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) - Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) + Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - *gentype.ClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingWebhookConfiguration, *admissionregistrationv1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - gentype.NewClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingWebhookConfiguration, *admissionregistrationv1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( "validatingwebhookconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} }, - func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }), + func() *admissionregistrationv1.ValidatingWebhookConfiguration { + return &admissionregistrationv1.ValidatingWebhookConfiguration{} + }, + func() *admissionregistrationv1.ValidatingWebhookConfigurationList { + return &admissionregistrationv1.ValidatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingWebhookConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index f6102d25a..f8a67c6d8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -19,15 +19,17 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface + MutatingAdmissionPoliciesGetter + MutatingAdmissionPolicyBindingsGetter ValidatingAdmissionPoliciesGetter ValidatingAdmissionPolicyBindingsGetter } @@ -37,6 +39,14 @@ type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } +func (c *AdmissionregistrationV1alpha1Client) MutatingAdmissionPolicies() MutatingAdmissionPolicyInterface { + return newMutatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1alpha1Client) MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInterface { + return newMutatingAdmissionPolicyBindings(c) +} + func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { return newValidatingAdmissionPolicies(c) } @@ -90,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := admissionregistrationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go index dc0e30ca4..3dbd9b402 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go @@ -28,12 +28,20 @@ type FakeAdmissionregistrationV1alpha1 struct { *testing.Fake } +func (c *FakeAdmissionregistrationV1alpha1) MutatingAdmissionPolicies() v1alpha1.MutatingAdmissionPolicyInterface { + return newFakeMutatingAdmissionPolicies(c) +} + +func (c *FakeAdmissionregistrationV1alpha1) MutatingAdmissionPolicyBindings() v1alpha1.MutatingAdmissionPolicyBindingInterface { + return newFakeMutatingAdmissionPolicyBindings(c) +} + func (c *FakeAdmissionregistrationV1alpha1) ValidatingAdmissionPolicies() v1alpha1.ValidatingAdmissionPolicyInterface { - return &FakeValidatingAdmissionPolicies{c} + return newFakeValidatingAdmissionPolicies(c) } func (c *FakeAdmissionregistrationV1alpha1) ValidatingAdmissionPolicyBindings() v1alpha1.ValidatingAdmissionPolicyBindingInterface { - return &FakeValidatingAdmissionPolicyBindings{c} + return newFakeValidatingAdmissionPolicyBindings(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicy.go new file mode 100644 index 000000000..9c07ce315 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicy.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" +) + +// fakeMutatingAdmissionPolicies implements MutatingAdmissionPolicyInterface +type fakeMutatingAdmissionPolicies struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.MutatingAdmissionPolicy, *v1alpha1.MutatingAdmissionPolicyList, *admissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration] + Fake *FakeAdmissionregistrationV1alpha1 +} + +func newFakeMutatingAdmissionPolicies(fake *FakeAdmissionregistrationV1alpha1) typedadmissionregistrationv1alpha1.MutatingAdmissionPolicyInterface { + return &fakeMutatingAdmissionPolicies{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.MutatingAdmissionPolicy, *v1alpha1.MutatingAdmissionPolicyList, *admissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("mutatingadmissionpolicies"), + v1alpha1.SchemeGroupVersion.WithKind("MutatingAdmissionPolicy"), + func() *v1alpha1.MutatingAdmissionPolicy { return &v1alpha1.MutatingAdmissionPolicy{} }, + func() *v1alpha1.MutatingAdmissionPolicyList { return &v1alpha1.MutatingAdmissionPolicyList{} }, + func(dst, src *v1alpha1.MutatingAdmissionPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.MutatingAdmissionPolicyList) []*v1alpha1.MutatingAdmissionPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.MutatingAdmissionPolicyList, items []*v1alpha1.MutatingAdmissionPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..a7fbb6eb4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicybinding.go @@ -0,0 +1,55 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" +) + +// fakeMutatingAdmissionPolicyBindings implements MutatingAdmissionPolicyBindingInterface +type fakeMutatingAdmissionPolicyBindings struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.MutatingAdmissionPolicyBinding, *v1alpha1.MutatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration] + Fake *FakeAdmissionregistrationV1alpha1 +} + +func newFakeMutatingAdmissionPolicyBindings(fake *FakeAdmissionregistrationV1alpha1) typedadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingInterface { + return &fakeMutatingAdmissionPolicyBindings{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.MutatingAdmissionPolicyBinding, *v1alpha1.MutatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("mutatingadmissionpolicybindings"), + v1alpha1.SchemeGroupVersion.WithKind("MutatingAdmissionPolicyBinding"), + func() *v1alpha1.MutatingAdmissionPolicyBinding { return &v1alpha1.MutatingAdmissionPolicyBinding{} }, + func() *v1alpha1.MutatingAdmissionPolicyBindingList { + return &v1alpha1.MutatingAdmissionPolicyBindingList{} + }, + func(dst, src *v1alpha1.MutatingAdmissionPolicyBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.MutatingAdmissionPolicyBindingList) []*v1alpha1.MutatingAdmissionPolicyBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.MutatingAdmissionPolicyBindingList, items []*v1alpha1.MutatingAdmissionPolicyBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go index ef4d843e0..aad223e15 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" ) -// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface -type FakeValidatingAdmissionPolicies struct { +// fakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type fakeValidatingAdmissionPolicies struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] Fake *FakeAdmissionregistrationV1alpha1 } -var validatingadmissionpoliciesResource = v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") - -var validatingadmissionpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicyList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1alpha1.ValidatingAdmissionPolicyList).ListMeta} - for _, item := range obj.(*v1alpha1.ValidatingAdmissionPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1alpha1.ValidatingAdmissionPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingAdmissionPolicies(fake *FakeAdmissionregistrationV1alpha1) typedadmissionregistrationv1alpha1.ValidatingAdmissionPolicyInterface { + return &fakeValidatingAdmissionPolicies{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"), + v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"), + func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} }, + func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }, + func(dst, src *v1alpha1.ValidatingAdmissionPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ValidatingAdmissionPolicyList) []*v1alpha1.ValidatingAdmissionPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ValidatingAdmissionPolicyList, items []*v1alpha1.ValidatingAdmissionPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go index f7cc966fb..a22a3f16b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go @@ -19,133 +19,37 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" ) -// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface -type FakeValidatingAdmissionPolicyBindings struct { +// fakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type fakeValidatingAdmissionPolicyBindings struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] Fake *FakeAdmissionregistrationV1alpha1 } -var validatingadmissionpolicybindingsResource = v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") - -var validatingadmissionpolicybindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicyBindingList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1alpha1.ValidatingAdmissionPolicyBindingList).ListMeta} - for _, item := range obj.(*v1alpha1.ValidatingAdmissionPolicyBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyBindingList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingAdmissionPolicyBindings(fake *FakeAdmissionregistrationV1alpha1) typedadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingInterface { + return &fakeValidatingAdmissionPolicyBindings{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"), + v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"), + func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} }, + func() *v1alpha1.ValidatingAdmissionPolicyBindingList { + return &v1alpha1.ValidatingAdmissionPolicyBindingList{} + }, + func(dst, src *v1alpha1.ValidatingAdmissionPolicyBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ValidatingAdmissionPolicyBindingList) []*v1alpha1.ValidatingAdmissionPolicyBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ValidatingAdmissionPolicyBindingList, items []*v1alpha1.ValidatingAdmissionPolicyBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index 94562da59..676578c63 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1alpha1 +type MutatingAdmissionPolicyExpansion interface{} + +type MutatingAdmissionPolicyBindingExpansion interface{} + type ValidatingAdmissionPolicyExpansion interface{} type ValidatingAdmissionPolicyBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..4a781a602 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// MutatingAdmissionPoliciesGetter has a method to return a MutatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type MutatingAdmissionPoliciesGetter interface { + MutatingAdmissionPolicies() MutatingAdmissionPolicyInterface +} + +// MutatingAdmissionPolicyInterface has methods to work with MutatingAdmissionPolicy resources. +type MutatingAdmissionPolicyInterface interface { + Create(ctx context.Context, mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + Update(ctx context.Context, mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error) + Apply(ctx context.Context, mutatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error) + MutatingAdmissionPolicyExpansion +} + +// mutatingAdmissionPolicies implements MutatingAdmissionPolicyInterface +type mutatingAdmissionPolicies struct { + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicy, *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration] +} + +// newMutatingAdmissionPolicies returns a MutatingAdmissionPolicies +func newMutatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *mutatingAdmissionPolicies { + return &mutatingAdmissionPolicies{ + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicy, *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration]( + "mutatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicy { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicy{} + }, + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyList { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.MutatingAdmissionPolicy](), + ), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..78057e200 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// MutatingAdmissionPolicyBindingsGetter has a method to return a MutatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type MutatingAdmissionPolicyBindingsGetter interface { + MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInterface +} + +// MutatingAdmissionPolicyBindingInterface has methods to work with MutatingAdmissionPolicyBinding resources. +type MutatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + Update(ctx context.Context, mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, mutatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error) + MutatingAdmissionPolicyBindingExpansion +} + +// mutatingAdmissionPolicyBindings implements MutatingAdmissionPolicyBindingInterface +type mutatingAdmissionPolicyBindings struct { + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration] +} + +// newMutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindings +func newMutatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *mutatingAdmissionPolicyBindings { + return &mutatingAdmissionPolicyBindings{ + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration]( + "mutatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding](), + ), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index c2b7c825c..ce2328b12 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface { // ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. type ValidatingAdmissionPolicyInterface interface { - Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) - Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) - Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( "validatingadmissionpolicies", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} }, - func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }), + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{} + }, + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index d8d0796ea..6236ea90c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,34 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface { // ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. type ValidatingAdmissionPolicyBindingInterface interface { - Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) - Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) - Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error) ValidatingAdmissionPolicyBindingExpansion } // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( "validatingadmissionpolicybindings", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} }, - func() *v1alpha1.ValidatingAdmissionPolicyBindingList { - return &v1alpha1.ValidatingAdmissionPolicyBindingList{} - }), + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 5a0a17d9b..16c42b0ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := admissionregistrationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go index badfbf034..158303f85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go @@ -29,19 +29,19 @@ type FakeAdmissionregistrationV1beta1 struct { } func (c *FakeAdmissionregistrationV1beta1) MutatingWebhookConfigurations() v1beta1.MutatingWebhookConfigurationInterface { - return &FakeMutatingWebhookConfigurations{c} + return newFakeMutatingWebhookConfigurations(c) } func (c *FakeAdmissionregistrationV1beta1) ValidatingAdmissionPolicies() v1beta1.ValidatingAdmissionPolicyInterface { - return &FakeValidatingAdmissionPolicies{c} + return newFakeValidatingAdmissionPolicies(c) } func (c *FakeAdmissionregistrationV1beta1) ValidatingAdmissionPolicyBindings() v1beta1.ValidatingAdmissionPolicyBindingInterface { - return &FakeValidatingAdmissionPolicyBindings{c} + return newFakeValidatingAdmissionPolicyBindings(c) } func (c *FakeAdmissionregistrationV1beta1) ValidatingWebhookConfigurations() v1beta1.ValidatingWebhookConfigurationInterface { - return &FakeValidatingWebhookConfigurations{c} + return newFakeValidatingWebhookConfigurations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index 767154932..c55b2e4f6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" ) -// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface -type FakeMutatingWebhookConfigurations struct { +// fakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type fakeMutatingWebhookConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] Fake *FakeAdmissionregistrationV1beta1 } -var mutatingwebhookconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations") - -var mutatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration") - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - emptyResult := &v1beta1.MutatingWebhookConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.MutatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1beta1.MutatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &v1beta1.MutatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta1.MutatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeMutatingWebhookConfigurations(fake *FakeAdmissionregistrationV1beta1) typedadmissionregistrationv1beta1.MutatingWebhookConfigurationInterface { + return &fakeMutatingWebhookConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"), + v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"), + func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} }, + func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }, + func(dst, src *v1beta1.MutatingWebhookConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.MutatingWebhookConfigurationList) []*v1beta1.MutatingWebhookConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.MutatingWebhookConfigurationList, items []*v1beta1.MutatingWebhookConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.MutatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go index e30891c77..e98a5655d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" ) -// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface -type FakeValidatingAdmissionPolicies struct { +// fakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type fakeValidatingAdmissionPolicies struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] Fake *FakeAdmissionregistrationV1beta1 } -var validatingadmissionpoliciesResource = v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") - -var validatingadmissionpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicyList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1beta1.ValidatingAdmissionPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.ValidatingAdmissionPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1beta1.ValidatingAdmissionPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - emptyResult := &v1beta1.ValidatingAdmissionPolicy{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingAdmissionPolicies(fake *FakeAdmissionregistrationV1beta1) typedadmissionregistrationv1beta1.ValidatingAdmissionPolicyInterface { + return &fakeValidatingAdmissionPolicies{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"), + v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"), + func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} }, + func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }, + func(dst, src *v1beta1.ValidatingAdmissionPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ValidatingAdmissionPolicyList) []*v1beta1.ValidatingAdmissionPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ValidatingAdmissionPolicyList, items []*v1beta1.ValidatingAdmissionPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go index 207db3752..3f0f865e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go @@ -19,133 +19,37 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" ) -// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface -type FakeValidatingAdmissionPolicyBindings struct { +// fakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type fakeValidatingAdmissionPolicyBindings struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] Fake *FakeAdmissionregistrationV1beta1 } -var validatingadmissionpolicybindingsResource = v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") - -var validatingadmissionpolicybindingsKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicyBindingList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1beta1.ValidatingAdmissionPolicyBindingList).ListMeta} - for _, item := range obj.(*v1beta1.ValidatingAdmissionPolicyBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1beta1.ValidatingAdmissionPolicyBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyBindingList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingAdmissionPolicyBindings(fake *FakeAdmissionregistrationV1beta1) typedadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingInterface { + return &fakeValidatingAdmissionPolicyBindings{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"), + v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"), + func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} }, + func() *v1beta1.ValidatingAdmissionPolicyBindingList { + return &v1beta1.ValidatingAdmissionPolicyBindingList{} + }, + func(dst, src *v1beta1.ValidatingAdmissionPolicyBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ValidatingAdmissionPolicyBindingList) []*v1beta1.ValidatingAdmissionPolicyBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ValidatingAdmissionPolicyBindingList, items []*v1beta1.ValidatingAdmissionPolicyBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index f78a31ee0..9a8301372 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -19,133 +19,37 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" ) -// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface -type FakeValidatingWebhookConfigurations struct { +// fakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type fakeValidatingWebhookConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] Fake *FakeAdmissionregistrationV1beta1 } -var validatingwebhookconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations") - -var validatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - emptyResult := &v1beta1.ValidatingWebhookConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.ValidatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1beta1.ValidatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &v1beta1.ValidatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - emptyResult := &v1beta1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta1.ValidatingWebhookConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeValidatingWebhookConfigurations(fake *FakeAdmissionregistrationV1beta1) typedadmissionregistrationv1beta1.ValidatingWebhookConfigurationInterface { + return &fakeValidatingWebhookConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"), + v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"), + func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} }, + func() *v1beta1.ValidatingWebhookConfigurationList { + return &v1beta1.ValidatingWebhookConfigurationList{} + }, + func(dst, src *v1beta1.ValidatingWebhookConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ValidatingWebhookConfigurationList) []*v1beta1.ValidatingWebhookConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ValidatingWebhookConfigurationList, items []*v1beta1.ValidatingWebhookConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 7a5bc8b9b..17e3541cc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,38 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.MutatingWebhookConfiguration, error) - Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.MutatingWebhookConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) - Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.MutatingWebhookConfiguration, err error) + Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - *gentype.ClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.MutatingWebhookConfiguration, *admissionregistrationv1beta1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - gentype.NewClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.MutatingWebhookConfiguration, *admissionregistrationv1beta1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( "mutatingwebhookconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} }, - func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }), + func() *admissionregistrationv1beta1.MutatingWebhookConfiguration { + return &admissionregistrationv1beta1.MutatingWebhookConfiguration{} + }, + func() *admissionregistrationv1beta1.MutatingWebhookConfigurationList { + return &admissionregistrationv1beta1.MutatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.MutatingWebhookConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go index 0023d8837..2c663ba1e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface { // ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. type ValidatingAdmissionPolicyInterface interface { - Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) - Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) - Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicy, *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicy, *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( "validatingadmissionpolicies", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} }, - func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }), + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicy { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicy{} + }, + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyList { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingAdmissionPolicy](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 8168d8cbc..196cc8f0a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,34 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface { // ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. type ValidatingAdmissionPolicyBindingInterface interface { - Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) - Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) - Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error) ValidatingAdmissionPolicyBindingExpansion } // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( "validatingadmissionpolicybindings", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} }, - func() *v1beta1.ValidatingAdmissionPolicyBindingList { - return &v1beta1.ValidatingAdmissionPolicyBindingList{} - }), + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 5abd96823..9f28346e8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,34 +38,38 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) - Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) - Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error) + Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - *gentype.ClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingWebhookConfiguration, *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - gentype.NewClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingWebhookConfiguration, *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( "validatingwebhookconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} }, - func() *v1beta1.ValidatingWebhookConfigurationList { - return &v1beta1.ValidatingWebhookConfigurationList{} - }), + func() *admissionregistrationv1beta1.ValidatingWebhookConfiguration { + return &admissionregistrationv1beta1.ValidatingWebhookConfiguration{} + }, + func() *admissionregistrationv1beta1.ValidatingWebhookConfigurationList { + return &admissionregistrationv1beta1.ValidatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingWebhookConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index 1794cb941..b76fadf91 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *InternalV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := apiserverinternalv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go index 0960a5e81..f4f4a78dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go @@ -29,7 +29,7 @@ type FakeInternalV1alpha1 struct { } func (c *FakeInternalV1alpha1) StorageVersions() v1alpha1.StorageVersionInterface { - return &FakeStorageVersions{c} + return newFakeStorageVersions(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go index e9f0b78d4..785c067f8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedapiserverinternalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" ) -// FakeStorageVersions implements StorageVersionInterface -type FakeStorageVersions struct { +// fakeStorageVersions implements StorageVersionInterface +type fakeStorageVersions struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration] Fake *FakeInternalV1alpha1 } -var storageversionsResource = v1alpha1.SchemeGroupVersion.WithResource("storageversions") - -var storageversionsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersion") - -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. -func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(storageversionsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. -func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { - emptyResult := &v1alpha1.StorageVersionList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(storageversionsResource, storageversionsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.StorageVersionList{ListMeta: obj.(*v1alpha1.StorageVersionList).ListMeta} - for _, item := range obj.(*v1alpha1.StorageVersionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageVersions. -func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionsResource, opts)) -} - -// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionsResource, "status", storageVersion, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. -func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageversionsResource, name, opts), &v1alpha1.StorageVersion{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(storageversionsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{}) - return err -} - -// Patch applies the patch and returns the patched storageVersion. -func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion. -func (c *FakeStorageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStorageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - emptyResult := &v1alpha1.StorageVersion{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeStorageVersions(fake *FakeInternalV1alpha1) typedapiserverinternalv1alpha1.StorageVersionInterface { + return &fakeStorageVersions{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("storageversions"), + v1alpha1.SchemeGroupVersion.WithKind("StorageVersion"), + func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} }, + func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }, + func(dst, src *v1alpha1.StorageVersionList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.StorageVersionList) []*v1alpha1.StorageVersion { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.StorageVersionList, items []*v1alpha1.StorageVersion) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.StorageVersion), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go index 436593f7f..cea897b3d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + applyconfigurationsapiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,40 @@ type StorageVersionsGetter interface { // StorageVersionInterface has methods to work with StorageVersion resources. type StorageVersionInterface interface { - Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) - Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Create(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.CreateOptions) (*apiserverinternalv1alpha1.StorageVersion, error) + Update(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.UpdateOptions) (*apiserverinternalv1alpha1.StorageVersion, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + UpdateStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.UpdateOptions) (*apiserverinternalv1alpha1.StorageVersion, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersion, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*apiserverinternalv1alpha1.StorageVersion, error) + List(ctx context.Context, opts v1.ListOptions) (*apiserverinternalv1alpha1.StorageVersionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) - Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiserverinternalv1alpha1.StorageVersion, err error) + Apply(ctx context.Context, storageVersion *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *apiserverinternalv1alpha1.StorageVersion, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) + ApplyStatus(ctx context.Context, storageVersion *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *apiserverinternalv1alpha1.StorageVersion, err error) StorageVersionExpansion } // storageVersions implements StorageVersionInterface type storageVersions struct { - *gentype.ClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration] + *gentype.ClientWithListAndApply[*apiserverinternalv1alpha1.StorageVersion, *apiserverinternalv1alpha1.StorageVersionList, *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration] } // newStorageVersions returns a StorageVersions func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { return &storageVersions{ - gentype.NewClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]( + gentype.NewClientWithListAndApply[*apiserverinternalv1alpha1.StorageVersion, *apiserverinternalv1alpha1.StorageVersionList, *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration]( "storageversions", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} }, - func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }), + func() *apiserverinternalv1alpha1.StorageVersion { return &apiserverinternalv1alpha1.StorageVersion{} }, + func() *apiserverinternalv1alpha1.StorageVersionList { + return &apiserverinternalv1alpha1.StorageVersionList{} + }, + gentype.PrefersProtobuf[*apiserverinternalv1alpha1.StorageVersion](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 397542eeb..cb0bf87ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/apps/v1" - "k8s.io/client-go/kubernetes/scheme" + appsv1 "k8s.io/api/apps/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *AppsV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := appsv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index 252f47ba2..8bf810810 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (*v1.ControllerRevision, error) - Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (*v1.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts metav1.CreateOptions) (*appsv1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts metav1.UpdateOptions) (*appsv1.ControllerRevision, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ControllerRevision, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ControllerRevisionList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.ControllerRevision, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.ControllerRevisionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) - Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.ControllerRevision, err error) + Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ControllerRevision, err error) ControllerRevisionExpansion } // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - *gentype.ClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1.ControllerRevision, *appsv1.ControllerRevisionList, *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - gentype.NewClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1.ControllerRevision, *appsv1.ControllerRevisionList, *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration]( "controllerrevisions", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.ControllerRevision { return &v1.ControllerRevision{} }, - func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }), + func() *appsv1.ControllerRevision { return &appsv1.ControllerRevision{} }, + func() *appsv1.ControllerRevisionList { return &appsv1.ControllerRevisionList{} }, + gentype.PrefersProtobuf[*appsv1.ControllerRevision](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 28917a796..6354da219 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) - Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + Create(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.CreateOptions) (*appsv1.DaemonSet, error) + Update(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.UpdateOptions) (*appsv1.DaemonSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.UpdateOptions) (*appsv1.DaemonSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DaemonSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.DaemonSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.DaemonSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DaemonSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) - Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.DaemonSet, err error) + Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DaemonSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) + ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - *gentype.ClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { return &daemonSets{ - gentype.NewClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration]( "daemonsets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.DaemonSet { return &v1.DaemonSet{} }, - func() *v1.DaemonSetList { return &v1.DaemonSetList{} }), + func() *appsv1.DaemonSet { return &appsv1.DaemonSet{} }, + func() *appsv1.DaemonSetList { return &appsv1.DaemonSetList{} }, + gentype.PrefersProtobuf[*appsv1.DaemonSet](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index 871d51cfe..cc06ccf3a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" + context "context" + fmt "fmt" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" + apply "k8s.io/client-go/util/apply" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -42,19 +42,19 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) - Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + Create(ctx context.Context, deployment *appsv1.Deployment, opts metav1.CreateOptions) (*appsv1.Deployment, error) + Update(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.Deployment, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DeploymentList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) - Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.Deployment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) + ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) @@ -64,19 +64,21 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - *gentype.ClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1Client, namespace string) *deployments { return &deployments{ - gentype.NewClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration]( "deployments", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Deployment { return &v1.Deployment{} }, - func() *v1.DeploymentList { return &v1.DeploymentList{} }), + func() *appsv1.Deployment { return &appsv1.Deployment{} }, + func() *appsv1.DeploymentList { return &appsv1.DeploymentList{} }, + gentype.PrefersProtobuf[*appsv1.Deployment](), + ), } } @@ -84,6 +86,7 @@ func newDeployments(c *AppsV1Client, namespace string) *deployments { func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). @@ -98,6 +101,7 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). @@ -116,19 +120,19 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } result = &autoscalingv1.Scale{} - err = c.GetClient().Patch(types.ApplyPatchType). + err = request. + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go index 458df0fa3..76949dbb5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go @@ -29,23 +29,23 @@ type FakeAppsV1 struct { } func (c *FakeAppsV1) ControllerRevisions(namespace string) v1.ControllerRevisionInterface { - return &FakeControllerRevisions{c, namespace} + return newFakeControllerRevisions(c, namespace) } func (c *FakeAppsV1) DaemonSets(namespace string) v1.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} + return newFakeDaemonSets(c, namespace) } func (c *FakeAppsV1) Deployments(namespace string) v1.DeploymentInterface { - return &FakeDeployments{c, namespace} + return newFakeDeployments(c, namespace) } func (c *FakeAppsV1) ReplicaSets(namespace string) v1.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} + return newFakeReplicaSets(c, namespace) } func (c *FakeAppsV1) StatefulSets(namespace string) v1.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} + return newFakeStatefulSets(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index c609ef534..92a338a5c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" ) -// FakeControllerRevisions implements ControllerRevisionInterface -type FakeControllerRevisions struct { +// fakeControllerRevisions implements ControllerRevisionInterface +type fakeControllerRevisions struct { + *gentype.FakeClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration] Fake *FakeAppsV1 - ns string -} - -var controllerrevisionsResource = v1.SchemeGroupVersion.WithResource("controllerrevisions") - -var controllerrevisionsKind = v1.SchemeGroupVersion.WithKind("ControllerRevision") - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - emptyResult := &v1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControllerRevision), err -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - emptyResult := &v1.ControllerRevisionList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ControllerRevisionList{ListMeta: obj.(*v1.ControllerRevisionList).ListMeta} - for _, item := range obj.(*v1.ControllerRevisionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) - } -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { - emptyResult := &v1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControllerRevision), err -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { - emptyResult := &v1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControllerRevision), err -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1.ControllerRevision{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ControllerRevisionList{}) - return err -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { - emptyResult := &v1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControllerRevision), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - emptyResult := &v1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeControllerRevisions(fake *FakeAppsV1, namespace string) typedappsv1.ControllerRevisionInterface { + return &fakeControllerRevisions{ + gentype.NewFakeClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("controllerrevisions"), + v1.SchemeGroupVersion.WithKind("ControllerRevision"), + func() *v1.ControllerRevision { return &v1.ControllerRevision{} }, + func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }, + func(dst, src *v1.ControllerRevisionList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ControllerRevisionList) []*v1.ControllerRevision { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ControllerRevisionList, items []*v1.ControllerRevision) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index bac3fc122..b1b47c401 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -19,179 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" ) -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { +// fakeDaemonSets implements DaemonSetInterface +type fakeDaemonSets struct { + *gentype.FakeClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration] Fake *FakeAppsV1 - ns string -} - -var daemonsetsResource = v1.SchemeGroupVersion.WithResource("daemonsets") - -var daemonsetsKind = v1.SchemeGroupVersion.WithKind("DaemonSet") - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.DaemonSet), err -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - emptyResult := &v1.DaemonSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.DaemonSetList{ListMeta: obj.(*v1.DaemonSetList).ListMeta} - for _, item := range obj.(*v1.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.DaemonSet), err -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.DaemonSet), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.DaemonSet), err -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1.DaemonSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.DaemonSetList{}) - return err -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.DaemonSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.DaemonSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - emptyResult := &v1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDaemonSets(fake *FakeAppsV1, namespace string) typedappsv1.DaemonSetInterface { + return &fakeDaemonSets{ + gentype.NewFakeClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("daemonsets"), + v1.SchemeGroupVersion.WithKind("DaemonSet"), + func() *v1.DaemonSet { return &v1.DaemonSet{} }, + func() *v1.DaemonSetList { return &v1.DaemonSetList{} }, + func(dst, src *v1.DaemonSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1.DaemonSetList) []*v1.DaemonSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.DaemonSetList, items []*v1.DaemonSet) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 8ed843288..7d7ae0ddb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -19,190 +19,49 @@ limitations under the License. package fake import ( - "context" + context "context" json "encoding/json" - "fmt" + fmt "fmt" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" testing "k8s.io/client-go/testing" ) -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { +// fakeDeployments implements DeploymentInterface +type fakeDeployments struct { + *gentype.FakeClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration] Fake *FakeAppsV1 - ns string } -var deploymentsResource = v1.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - emptyResult := &v1.DeploymentList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.DeploymentList{ListMeta: obj.(*v1.DeploymentList).ListMeta} - for _, item := range obj.(*v1.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Deployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDeployments(fake *FakeAppsV1, namespace string) typedappsv1.DeploymentInterface { + return &fakeDeployments{ + gentype.NewFakeClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("deployments"), + v1.SchemeGroupVersion.WithKind("Deployment"), + func() *v1.Deployment { return &v1.Deployment{} }, + func() *v1.DeploymentList { return &v1.DeploymentList{} }, + func(dst, src *v1.DeploymentList) { dst.ListMeta = src.ListMeta }, + func(list *v1.DeploymentList) []*v1.Deployment { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.DeploymentList, items []*v1.Deployment) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", deploymentName, options), emptyResult) if obj == nil { return emptyResult, err @@ -211,10 +70,10 @@ func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &autoscalingv1.Scale{}) if obj == nil { return emptyResult, err @@ -224,7 +83,7 @@ func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } @@ -234,7 +93,7 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, } emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) + Invokes(testing.NewPatchSubresourceActionWithOptions(c.Resource(), c.Namespace(), deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index 942a4e64a..691818ec7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -19,190 +19,49 @@ limitations under the License. package fake import ( - "context" + context "context" json "encoding/json" - "fmt" + fmt "fmt" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" testing "k8s.io/client-go/testing" ) -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { +// fakeReplicaSets implements ReplicaSetInterface +type fakeReplicaSets struct { + *gentype.FakeClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration] Fake *FakeAppsV1 - ns string } -var replicasetsResource = v1.SchemeGroupVersion.WithResource("replicasets") - -var replicasetsKind = v1.SchemeGroupVersion.WithKind("ReplicaSet") - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicaSet), err -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - emptyResult := &v1.ReplicaSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ReplicaSetList{ListMeta: obj.(*v1.ReplicaSetList).ListMeta} - for _, item := range obj.(*v1.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) - -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicaSet), err -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicaSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicaSet), err -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1.ReplicaSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ReplicaSetList{}) - return err -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicaSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicaSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - emptyResult := &v1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeReplicaSets(fake *FakeAppsV1, namespace string) typedappsv1.ReplicaSetInterface { + return &fakeReplicaSets{ + gentype.NewFakeClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("replicasets"), + v1.SchemeGroupVersion.WithKind("ReplicaSet"), + func() *v1.ReplicaSet { return &v1.ReplicaSet{} }, + func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }, + func(dst, src *v1.ReplicaSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ReplicaSetList) []*v1.ReplicaSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ReplicaSetList, items []*v1.ReplicaSet) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", replicaSetName, options), emptyResult) if obj == nil { return emptyResult, err @@ -211,10 +70,10 @@ func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &autoscalingv1.Scale{}) if obj == nil { return emptyResult, err @@ -224,7 +83,7 @@ func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } @@ -234,7 +93,7 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, } emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) + Invokes(testing.NewPatchSubresourceActionWithOptions(c.Resource(), c.Namespace(), replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index ae4e811fb..de3a19da1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -19,190 +19,49 @@ limitations under the License. package fake import ( - "context" + context "context" json "encoding/json" - "fmt" + fmt "fmt" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" testing "k8s.io/client-go/testing" ) -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { +// fakeStatefulSets implements StatefulSetInterface +type fakeStatefulSets struct { + *gentype.FakeClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration] Fake *FakeAppsV1 - ns string } -var statefulsetsResource = v1.SchemeGroupVersion.WithResource("statefulsets") - -var statefulsetsKind = v1.SchemeGroupVersion.WithKind("StatefulSet") - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StatefulSet), err -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - emptyResult := &v1.StatefulSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.StatefulSetList{ListMeta: obj.(*v1.StatefulSetList).ListMeta} - for _, item := range obj.(*v1.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StatefulSet), err -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StatefulSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StatefulSet), err -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1.StatefulSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.StatefulSetList{}) - return err -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StatefulSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StatefulSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - emptyResult := &v1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeStatefulSets(fake *FakeAppsV1, namespace string) typedappsv1.StatefulSetInterface { + return &fakeStatefulSets{ + gentype.NewFakeClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("statefulsets"), + v1.SchemeGroupVersion.WithKind("StatefulSet"), + func() *v1.StatefulSet { return &v1.StatefulSet{} }, + func() *v1.StatefulSetList { return &v1.StatefulSetList{} }, + func(dst, src *v1.StatefulSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1.StatefulSetList) []*v1.StatefulSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.StatefulSetList, items []*v1.StatefulSet) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", statefulSetName, options), emptyResult) if obj == nil { return emptyResult, err @@ -211,10 +70,10 @@ func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &autoscalingv1.Scale{}) if obj == nil { return emptyResult, err @@ -224,7 +83,7 @@ func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName stri // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } @@ -234,7 +93,7 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin } emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) + Invokes(testing.NewPatchSubresourceActionWithOptions(c.Resource(), c.Namespace(), statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index d6dec016b..db0fed952 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" + context "context" + fmt "fmt" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" + apply "k8s.io/client-go/util/apply" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -42,19 +42,19 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) - Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + Create(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.CreateOptions) (*appsv1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.UpdateOptions) (*appsv1.ReplicaSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.UpdateOptions) (*appsv1.ReplicaSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicaSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicaSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.ReplicaSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.ReplicaSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) - Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.ReplicaSet, err error) + Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ReplicaSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) + ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) @@ -64,19 +64,21 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - *gentype.ClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1.ReplicaSet, *appsv1.ReplicaSetList, *applyconfigurationsappsv1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { return &replicaSets{ - gentype.NewClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1.ReplicaSet, *appsv1.ReplicaSetList, *applyconfigurationsappsv1.ReplicaSetApplyConfiguration]( "replicasets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.ReplicaSet { return &v1.ReplicaSet{} }, - func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }), + func() *appsv1.ReplicaSet { return &appsv1.ReplicaSet{} }, + func() *appsv1.ReplicaSetList { return &appsv1.ReplicaSetList{} }, + gentype.PrefersProtobuf[*appsv1.ReplicaSet](), + ), } } @@ -84,6 +86,7 @@ func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). @@ -98,6 +101,7 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). @@ -116,19 +120,19 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } result = &autoscalingv1.Scale{} - err = c.GetClient().Patch(types.ApplyPatchType). + err = request. + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index b25ed0723..e52cc6159 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" + context "context" + fmt "fmt" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" + apply "k8s.io/client-go/util/apply" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -42,19 +42,19 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) - Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + Create(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.CreateOptions) (*appsv1.StatefulSet, error) + Update(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.UpdateOptions) (*appsv1.StatefulSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.UpdateOptions) (*appsv1.StatefulSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StatefulSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.StatefulSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.StatefulSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.StatefulSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) - Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.StatefulSet, err error) + Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.StatefulSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) + ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) @@ -64,19 +64,21 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - *gentype.ClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { return &statefulSets{ - gentype.NewClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration]( "statefulsets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.StatefulSet { return &v1.StatefulSet{} }, - func() *v1.StatefulSetList { return &v1.StatefulSetList{} }), + func() *appsv1.StatefulSet { return &appsv1.StatefulSet{} }, + func() *appsv1.StatefulSetList { return &appsv1.StatefulSetList{} }, + gentype.PrefersProtobuf[*appsv1.StatefulSet](), + ), } } @@ -84,6 +86,7 @@ func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). @@ -98,6 +101,7 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). @@ -116,19 +120,19 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } result = &autoscalingv1.Scale{} - err = c.GetClient().Patch(types.ApplyPatchType). + err = request. + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 6b7148c5a..72bde633b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + appsv1beta1 "k8s.io/api/apps/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -95,10 +95,10 @@ func New(c rest.Interface) *AppsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := appsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 185f7cc4e..1bd92695b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (*v1beta1.ControllerRevision, error) - Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (*v1beta1.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevision, opts v1.CreateOptions) (*appsv1beta1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevision, opts v1.UpdateOptions) (*appsv1beta1.ControllerRevision, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRevision, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.ControllerRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.ControllerRevisionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) - Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.ControllerRevision, err error) + Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.ControllerRevision, err error) ControllerRevisionExpansion } // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - *gentype.ClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta1.ControllerRevision, *appsv1beta1.ControllerRevisionList, *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - gentype.NewClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta1.ControllerRevision, *appsv1beta1.ControllerRevisionList, *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration]( "controllerrevisions", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} }, - func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }), + func() *appsv1beta1.ControllerRevision { return &appsv1beta1.ControllerRevision{} }, + func() *appsv1beta1.ControllerRevisionList { return &appsv1beta1.ControllerRevisionList{} }, + gentype.PrefersProtobuf[*appsv1beta1.ControllerRevision](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 06e4b7bf9..e01dd5a2f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) - Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Create(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.CreateOptions) (*appsv1beta1.Deployment, error) + Update(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.UpdateOptions) (*appsv1beta1.Deployment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.UpdateOptions) (*appsv1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) - Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.Deployment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta1.Deployment, *appsv1beta1.DeploymentList, *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { return &deployments{ - gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta1.Deployment, *appsv1beta1.DeploymentList, *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration]( "deployments", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, - func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), + func() *appsv1beta1.Deployment { return &appsv1beta1.Deployment{} }, + func() *appsv1beta1.DeploymentList { return &appsv1beta1.DeploymentList{} }, + gentype.PrefersProtobuf[*appsv1beta1.Deployment](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go index 8e65d78d2..ad3d0d6d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go @@ -29,15 +29,15 @@ type FakeAppsV1beta1 struct { } func (c *FakeAppsV1beta1) ControllerRevisions(namespace string) v1beta1.ControllerRevisionInterface { - return &FakeControllerRevisions{c, namespace} + return newFakeControllerRevisions(c, namespace) } func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { - return &FakeDeployments{c, namespace} + return newFakeDeployments(c, namespace) } func (c *FakeAppsV1beta1) StatefulSets(namespace string) v1beta1.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} + return newFakeStatefulSets(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 7ea2b2e11..fd075b32c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" ) -// FakeControllerRevisions implements ControllerRevisionInterface -type FakeControllerRevisions struct { +// fakeControllerRevisions implements ControllerRevisionInterface +type fakeControllerRevisions struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration] Fake *FakeAppsV1beta1 - ns string -} - -var controllerrevisionsResource = v1beta1.SchemeGroupVersion.WithResource("controllerrevisions") - -var controllerrevisionsKind = v1beta1.SchemeGroupVersion.WithKind("ControllerRevision") - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - emptyResult := &v1beta1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - emptyResult := &v1beta1.ControllerRevisionList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ControllerRevisionList{ListMeta: obj.(*v1beta1.ControllerRevisionList).ListMeta} - for _, item := range obj.(*v1beta1.ControllerRevisionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) - } -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { - emptyResult := &v1beta1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { - emptyResult := &v1beta1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1beta1.ControllerRevision{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{}) - return err -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - emptyResult := &v1beta1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - emptyResult := &v1beta1.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeControllerRevisions(fake *FakeAppsV1beta1, namespace string) typedappsv1beta1.ControllerRevisionInterface { + return &fakeControllerRevisions{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("controllerrevisions"), + v1beta1.SchemeGroupVersion.WithKind("ControllerRevision"), + func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} }, + func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }, + func(dst, src *v1beta1.ControllerRevisionList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ControllerRevisionList) []*v1beta1.ControllerRevision { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ControllerRevisionList, items []*v1beta1.ControllerRevision) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 05c557ecb..edef6cb05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" ) -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { +// fakeDeployments implements DeploymentInterface +type fakeDeployments struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration] Fake *FakeAppsV1beta1 - ns string -} - -var deploymentsResource = v1beta1.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - emptyResult := &v1beta1.DeploymentList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta} - for _, item := range obj.(*v1beta1.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta1.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDeployments(fake *FakeAppsV1beta1, namespace string) typedappsv1beta1.DeploymentInterface { + return &fakeDeployments{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("deployments"), + v1beta1.SchemeGroupVersion.WithKind("Deployment"), + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }, + func(dst, src *v1beta1.DeploymentList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.DeploymentList) []*v1beta1.Deployment { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.DeploymentList, items []*v1beta1.Deployment) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.Deployment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index c38690554..e6a87f590 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" ) -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { +// fakeStatefulSets implements StatefulSetInterface +type fakeStatefulSets struct { + *gentype.FakeClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration] Fake *FakeAppsV1beta1 - ns string -} - -var statefulsetsResource = v1beta1.SchemeGroupVersion.WithResource("statefulsets") - -var statefulsetsKind = v1beta1.SchemeGroupVersion.WithKind("StatefulSet") - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - emptyResult := &v1beta1.StatefulSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.StatefulSetList{ListMeta: obj.(*v1beta1.StatefulSetList).ListMeta} - for _, item := range obj.(*v1beta1.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StatefulSet), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1beta1.StatefulSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) - return err -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeStatefulSets(fake *FakeAppsV1beta1, namespace string) typedappsv1beta1.StatefulSetInterface { + return &fakeStatefulSets{ + gentype.NewFakeClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("statefulsets"), + v1beta1.SchemeGroupVersion.WithKind("StatefulSet"), + func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} }, + func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }, + func(dst, src *v1beta1.StatefulSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.StatefulSetList) []*v1beta1.StatefulSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.StatefulSetList, items []*v1beta1.StatefulSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.StatefulSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 1ff69eb99..b88acdeb0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) - Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + Create(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.CreateOptions) (*appsv1beta1.StatefulSet, error) + Update(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.UpdateOptions) (*appsv1beta1.StatefulSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.UpdateOptions) (*appsv1beta1.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StatefulSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.StatefulSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.StatefulSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) - Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.StatefulSet, err error) + Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.StatefulSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) + ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.StatefulSet, err error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - *gentype.ClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta1.StatefulSet, *appsv1beta1.StatefulSetList, *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { return &statefulSets{ - gentype.NewClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta1.StatefulSet, *appsv1beta1.StatefulSetList, *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration]( "statefulsets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} }, - func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }), + func() *appsv1beta1.StatefulSet { return &appsv1beta1.StatefulSet{} }, + func() *appsv1beta1.StatefulSetList { return &appsv1beta1.StatefulSetList{} }, + gentype.PrefersProtobuf[*appsv1beta1.StatefulSet](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 968abc56f..e13d12a76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - "net/http" + http "net/http" - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/client-go/kubernetes/scheme" + appsv1beta2 "k8s.io/api/apps/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *AppsV1beta2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta2.SchemeGroupVersion + gv := appsv1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index 6caee6a72..a170805b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (*v1beta2.ControllerRevision, error) - Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (*v1beta2.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevision, opts v1.CreateOptions) (*appsv1beta2.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevision, opts v1.UpdateOptions) (*appsv1beta2.ControllerRevision, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ControllerRevision, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.ControllerRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.ControllerRevisionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) - Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.ControllerRevision, err error) + Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ControllerRevision, err error) ControllerRevisionExpansion } // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - *gentype.ClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta2.ControllerRevision, *appsv1beta2.ControllerRevisionList, *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { return &controllerRevisions{ - gentype.NewClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta2.ControllerRevision, *appsv1beta2.ControllerRevisionList, *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration]( "controllerrevisions", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} }, - func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }), + func() *appsv1beta2.ControllerRevision { return &appsv1beta2.ControllerRevision{} }, + func() *appsv1beta2.ControllerRevisionList { return &appsv1beta2.ControllerRevisionList{} }, + gentype.PrefersProtobuf[*appsv1beta2.ControllerRevision](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 766dc6d43..f078121b0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) - Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + Create(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.CreateOptions) (*appsv1beta2.DaemonSet, error) + Update(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.UpdateOptions) (*appsv1beta2.DaemonSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.UpdateOptions) (*appsv1beta2.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.DaemonSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DaemonSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.DaemonSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.DaemonSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) - Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.DaemonSet, err error) + Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.DaemonSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) + ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - *gentype.ClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta2.DaemonSet, *appsv1beta2.DaemonSetList, *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { return &daemonSets{ - gentype.NewClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta2.DaemonSet, *appsv1beta2.DaemonSetList, *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration]( "daemonsets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} }, - func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }), + func() *appsv1beta2.DaemonSet { return &appsv1beta2.DaemonSet{} }, + func() *appsv1beta2.DaemonSetList { return &appsv1beta2.DaemonSetList{} }, + gentype.PrefersProtobuf[*appsv1beta2.DaemonSet](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 6592ee8cd..1be57edb2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) - Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + Create(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.CreateOptions) (*appsv1beta2.Deployment, error) + Update(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.UpdateOptions) (*appsv1beta2.Deployment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + UpdateStatus(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.UpdateOptions) (*appsv1beta2.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.Deployment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DeploymentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) - Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Deployment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) + ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - *gentype.ClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta2.Deployment, *appsv1beta2.DeploymentList, *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { return &deployments{ - gentype.NewClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta2.Deployment, *appsv1beta2.DeploymentList, *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration]( "deployments", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta2.Deployment { return &v1beta2.Deployment{} }, - func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }), + func() *appsv1beta2.Deployment { return &appsv1beta2.Deployment{} }, + func() *appsv1beta2.DeploymentList { return &appsv1beta2.DeploymentList{} }, + gentype.PrefersProtobuf[*appsv1beta2.Deployment](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go index 0ec34a2cd..2a5c3ed98 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go @@ -29,23 +29,23 @@ type FakeAppsV1beta2 struct { } func (c *FakeAppsV1beta2) ControllerRevisions(namespace string) v1beta2.ControllerRevisionInterface { - return &FakeControllerRevisions{c, namespace} + return newFakeControllerRevisions(c, namespace) } func (c *FakeAppsV1beta2) DaemonSets(namespace string) v1beta2.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} + return newFakeDaemonSets(c, namespace) } func (c *FakeAppsV1beta2) Deployments(namespace string) v1beta2.DeploymentInterface { - return &FakeDeployments{c, namespace} + return newFakeDeployments(c, namespace) } func (c *FakeAppsV1beta2) ReplicaSets(namespace string) v1beta2.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} + return newFakeReplicaSets(c, namespace) } func (c *FakeAppsV1beta2) StatefulSets(namespace string) v1beta2.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} + return newFakeStatefulSets(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 45b205070..f9de53c16 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" ) -// FakeControllerRevisions implements ControllerRevisionInterface -type FakeControllerRevisions struct { +// fakeControllerRevisions implements ControllerRevisionInterface +type fakeControllerRevisions struct { + *gentype.FakeClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration] Fake *FakeAppsV1beta2 - ns string -} - -var controllerrevisionsResource = v1beta2.SchemeGroupVersion.WithResource("controllerrevisions") - -var controllerrevisionsKind = v1beta2.SchemeGroupVersion.WithKind("ControllerRevision") - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - emptyResult := &v1beta2.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - emptyResult := &v1beta2.ControllerRevisionList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.ControllerRevisionList{ListMeta: obj.(*v1beta2.ControllerRevisionList).ListMeta} - for _, item := range obj.(*v1beta2.ControllerRevisionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) - } -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { - emptyResult := &v1beta2.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { - emptyResult := &v1beta2.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1beta2.ControllerRevision{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{}) - return err -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - emptyResult := &v1beta2.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - emptyResult := &v1beta2.ControllerRevision{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeControllerRevisions(fake *FakeAppsV1beta2, namespace string) typedappsv1beta2.ControllerRevisionInterface { + return &fakeControllerRevisions{ + gentype.NewFakeClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]( + fake.Fake, + namespace, + v1beta2.SchemeGroupVersion.WithResource("controllerrevisions"), + v1beta2.SchemeGroupVersion.WithKind("ControllerRevision"), + func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} }, + func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }, + func(dst, src *v1beta2.ControllerRevisionList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.ControllerRevisionList) []*v1beta2.ControllerRevision { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta2.ControllerRevisionList, items []*v1beta2.ControllerRevision) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index 61ceeb141..e6ed84e2d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" ) -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { +// fakeDaemonSets implements DaemonSetInterface +type fakeDaemonSets struct { + *gentype.FakeClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration] Fake *FakeAppsV1beta2 - ns string -} - -var daemonsetsResource = v1beta2.SchemeGroupVersion.WithResource("daemonsets") - -var daemonsetsKind = v1beta2.SchemeGroupVersion.WithKind("DaemonSet") - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - emptyResult := &v1beta2.DaemonSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.DaemonSetList{ListMeta: obj.(*v1beta2.DaemonSetList).ListMeta} - for _, item := range obj.(*v1beta2.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.DaemonSet), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1beta2.DaemonSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{}) - return err -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - emptyResult := &v1beta2.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDaemonSets(fake *FakeAppsV1beta2, namespace string) typedappsv1beta2.DaemonSetInterface { + return &fakeDaemonSets{ + gentype.NewFakeClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]( + fake.Fake, + namespace, + v1beta2.SchemeGroupVersion.WithResource("daemonsets"), + v1beta2.SchemeGroupVersion.WithKind("DaemonSet"), + func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} }, + func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }, + func(dst, src *v1beta2.DaemonSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.DaemonSetList) []*v1beta2.DaemonSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta2.DaemonSetList, items []*v1beta2.DaemonSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index d849856a4..b240a7d55 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" ) -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { +// fakeDeployments implements DeploymentInterface +type fakeDeployments struct { + *gentype.FakeClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration] Fake *FakeAppsV1beta2 - ns string -} - -var deploymentsResource = v1beta2.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1beta2.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - emptyResult := &v1beta2.DeploymentList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.DeploymentList{ListMeta: obj.(*v1beta2.DeploymentList).ListMeta} - for _, item := range obj.(*v1beta2.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.Deployment), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta2.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1beta2.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDeployments(fake *FakeAppsV1beta2, namespace string) typedappsv1beta2.DeploymentInterface { + return &fakeDeployments{ + gentype.NewFakeClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]( + fake.Fake, + namespace, + v1beta2.SchemeGroupVersion.WithResource("deployments"), + v1beta2.SchemeGroupVersion.WithKind("Deployment"), + func() *v1beta2.Deployment { return &v1beta2.Deployment{} }, + func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }, + func(dst, src *v1beta2.DeploymentList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.DeploymentList) []*v1beta2.Deployment { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta2.DeploymentList, items []*v1beta2.Deployment) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.Deployment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 1f957f084..ec886dc79 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" ) -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { +// fakeReplicaSets implements ReplicaSetInterface +type fakeReplicaSets struct { + *gentype.FakeClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration] Fake *FakeAppsV1beta2 - ns string -} - -var replicasetsResource = v1beta2.SchemeGroupVersion.WithResource("replicasets") - -var replicasetsKind = v1beta2.SchemeGroupVersion.WithKind("ReplicaSet") - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - emptyResult := &v1beta2.ReplicaSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.ReplicaSetList{ListMeta: obj.(*v1beta2.ReplicaSetList).ListMeta} - for _, item := range obj.(*v1beta2.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) - -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ReplicaSet), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1beta2.ReplicaSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{}) - return err -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - emptyResult := &v1beta2.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeReplicaSets(fake *FakeAppsV1beta2, namespace string) typedappsv1beta2.ReplicaSetInterface { + return &fakeReplicaSets{ + gentype.NewFakeClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]( + fake.Fake, + namespace, + v1beta2.SchemeGroupVersion.WithResource("replicasets"), + v1beta2.SchemeGroupVersion.WithKind("ReplicaSet"), + func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} }, + func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }, + func(dst, src *v1beta2.ReplicaSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.ReplicaSetList) []*v1beta2.ReplicaSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta2.ReplicaSetList, items []*v1beta2.ReplicaSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.ReplicaSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index ac8945aa7..6e2cbbf5c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -19,188 +19,49 @@ limitations under the License. package fake import ( - "context" + context "context" json "encoding/json" - "fmt" + fmt "fmt" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" + typedappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" testing "k8s.io/client-go/testing" ) -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { +// fakeStatefulSets implements StatefulSetInterface +type fakeStatefulSets struct { + *gentype.FakeClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration] Fake *FakeAppsV1beta2 - ns string } -var statefulsetsResource = v1beta2.SchemeGroupVersion.WithResource("statefulsets") - -var statefulsetsKind = v1beta2.SchemeGroupVersion.WithKind("StatefulSet") - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - emptyResult := &v1beta2.StatefulSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.StatefulSetList{ListMeta: obj.(*v1beta2.StatefulSetList).ListMeta} - for _, item := range obj.(*v1beta2.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1beta2.StatefulSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{}) - return err -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - emptyResult := &v1beta2.StatefulSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeStatefulSets(fake *FakeAppsV1beta2, namespace string) typedappsv1beta2.StatefulSetInterface { + return &fakeStatefulSets{ + gentype.NewFakeClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]( + fake.Fake, + namespace, + v1beta2.SchemeGroupVersion.WithResource("statefulsets"), + v1beta2.SchemeGroupVersion.WithKind("StatefulSet"), + func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} }, + func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }, + func(dst, src *v1beta2.StatefulSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.StatefulSetList) []*v1beta2.StatefulSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta2.StatefulSetList, items []*v1beta2.StatefulSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { +func (c *fakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", statefulSetName, options), emptyResult) if obj == nil { return emptyResult, err @@ -209,10 +70,10 @@ func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { +func (c *fakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &v1beta2.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &v1beta2.Scale{}) if obj == nil { return emptyResult, err @@ -222,7 +83,7 @@ func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName stri // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) { +func (c *fakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } @@ -232,7 +93,7 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin } emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) + Invokes(testing.NewPatchSubresourceActionWithOptions(c.Resource(), c.Namespace(), statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 90380ca98..12bac0923 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) - Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + Create(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.CreateOptions) (*appsv1beta2.ReplicaSet, error) + Update(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.UpdateOptions) (*appsv1beta2.ReplicaSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.UpdateOptions) (*appsv1beta2.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ReplicaSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.ReplicaSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.ReplicaSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) - Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.ReplicaSet, err error) + Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ReplicaSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) + ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ReplicaSet, err error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - *gentype.ClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta2.ReplicaSet, *appsv1beta2.ReplicaSetList, *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { return &replicaSets{ - gentype.NewClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta2.ReplicaSet, *appsv1beta2.ReplicaSetList, *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration]( "replicasets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} }, - func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }), + func() *appsv1beta2.ReplicaSet { return &appsv1beta2.ReplicaSet{} }, + func() *appsv1beta2.ReplicaSetList { return &appsv1beta2.ReplicaSetList{} }, + gentype.PrefersProtobuf[*appsv1beta2.ReplicaSet](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index f2d673abb..c71e93494 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" + context "context" + fmt "fmt" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" + apply "k8s.io/client-go/util/apply" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -40,48 +40,51 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) - Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + Create(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.CreateOptions) (*appsv1beta2.StatefulSet, error) + Update(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.UpdateOptions) (*appsv1beta2.StatefulSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.UpdateOptions) (*appsv1beta2.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.StatefulSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.StatefulSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.StatefulSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.StatefulSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) - Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.StatefulSet, err error) + Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.StatefulSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) - GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) - UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) - ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta2.Scale, error) + ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.StatefulSet, err error) + GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*appsv1beta2.Scale, error) + UpdateScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.Scale, opts v1.UpdateOptions) (*appsv1beta2.Scale, error) + ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsappsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*appsv1beta2.Scale, error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - *gentype.ClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration] + *gentype.ClientWithListAndApply[*appsv1beta2.StatefulSet, *appsv1beta2.StatefulSetList, *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { return &statefulSets{ - gentype.NewClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*appsv1beta2.StatefulSet, *appsv1beta2.StatefulSetList, *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration]( "statefulsets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} }, - func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }), + func() *appsv1beta2.StatefulSet { return &appsv1beta2.StatefulSet{} }, + func() *appsv1beta2.StatefulSetList { return &appsv1beta2.StatefulSetList{} }, + gentype.PrefersProtobuf[*appsv1beta2.StatefulSet](), + ), } } -// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { - result = &v1beta2.Scale{} +// GetScale takes name of the statefulSet, and returns the corresponding appsv1beta2.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *appsv1beta2.Scale, err error) { + result = &appsv1beta2.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). @@ -93,9 +96,10 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { - result = &v1beta2.Scale{} +func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.Scale, opts v1.UpdateOptions) (result *appsv1beta2.Scale, err error) { + result = &appsv1beta2.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). @@ -109,24 +113,24 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) { +func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsappsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } - result = &v1beta2.Scale{} - err = c.GetClient().Patch(types.ApplyPatchType). + result = &appsv1beta2.Scale{} + err = request. + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index 81be8b2e0..bd5df7798 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/authentication/v1" - "k8s.io/client-go/kubernetes/scheme" + authenticationv1 "k8s.io/api/authentication/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *AuthenticationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := authenticationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go index 865239ff6..569782ff2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go @@ -29,11 +29,11 @@ type FakeAuthenticationV1 struct { } func (c *FakeAuthenticationV1) SelfSubjectReviews() v1.SelfSubjectReviewInterface { - return &FakeSelfSubjectReviews{c} + return newFakeSelfSubjectReviews(c) } func (c *FakeAuthenticationV1) TokenReviews() v1.TokenReviewInterface { - return &FakeTokenReviews{c} + return newFakeTokenReviews(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go index 7e7c3138a..3a101363f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" ) -// FakeSelfSubjectReviews implements SelfSubjectReviewInterface -type FakeSelfSubjectReviews struct { +// fakeSelfSubjectReviews implements SelfSubjectReviewInterface +type fakeSelfSubjectReviews struct { + *gentype.FakeClient[*v1.SelfSubjectReview] Fake *FakeAuthenticationV1 } -var selfsubjectreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectreviews") - -var selfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectReview") - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { - emptyResult := &v1.SelfSubjectReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectReviews(fake *FakeAuthenticationV1) authenticationv1.SelfSubjectReviewInterface { + return &fakeSelfSubjectReviews{ + gentype.NewFakeClient[*v1.SelfSubjectReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("selfsubjectreviews"), + v1.SchemeGroupVersion.WithKind("SelfSubjectReview"), + func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }, + ), + fake, } - return obj.(*v1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index a22f33542..26d5d3733 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" ) -// FakeTokenReviews implements TokenReviewInterface -type FakeTokenReviews struct { +// fakeTokenReviews implements TokenReviewInterface +type fakeTokenReviews struct { + *gentype.FakeClient[*v1.TokenReview] Fake *FakeAuthenticationV1 } -var tokenreviewsResource = v1.SchemeGroupVersion.WithResource("tokenreviews") - -var tokenreviewsKind = v1.SchemeGroupVersion.WithKind("TokenReview") - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { - emptyResult := &v1.TokenReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeTokenReviews(fake *FakeAuthenticationV1) authenticationv1.TokenReviewInterface { + return &fakeTokenReviews{ + gentype.NewFakeClient[*v1.TokenReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("tokenreviews"), + v1.SchemeGroupVersion.WithKind("TokenReview"), + func() *v1.TokenReview { return &v1.TokenReview{} }, + ), + fake, } - return obj.(*v1.TokenReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go index 720dd9e7e..9113b6a6d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authentication/v1" + authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SelfSubjectReviewsGetter interface { // SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. type SelfSubjectReviewInterface interface { - Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error) + Create(ctx context.Context, selfSubjectReview *authenticationv1.SelfSubjectReview, opts metav1.CreateOptions) (*authenticationv1.SelfSubjectReview, error) SelfSubjectReviewExpansion } // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - *gentype.Client[*v1.SelfSubjectReview] + *gentype.Client[*authenticationv1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { return &selfSubjectReviews{ - gentype.NewClient[*v1.SelfSubjectReview]( + gentype.NewClient[*authenticationv1.SelfSubjectReview]( "selfsubjectreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }), + func() *authenticationv1.SelfSubjectReview { return &authenticationv1.SelfSubjectReview{} }, + gentype.PrefersProtobuf[*authenticationv1.SelfSubjectReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index 52c55fab0..ce8b62d1b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authentication/v1" + authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { - Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (*v1.TokenReview, error) + Create(ctx context.Context, tokenReview *authenticationv1.TokenReview, opts metav1.CreateOptions) (*authenticationv1.TokenReview, error) TokenReviewExpansion } // tokenReviews implements TokenReviewInterface type tokenReviews struct { - *gentype.Client[*v1.TokenReview] + *gentype.Client[*authenticationv1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { return &tokenReviews{ - gentype.NewClient[*v1.TokenReview]( + gentype.NewClient[*authenticationv1.TokenReview]( "tokenreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.TokenReview { return &v1.TokenReview{} }), + func() *authenticationv1.TokenReview { return &authenticationv1.TokenReview{} }, + gentype.PrefersProtobuf[*authenticationv1.TokenReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go index 187392661..821265859 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/authentication/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AuthenticationV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := authenticationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go index 1a1a04f41..3c76aa5a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go @@ -29,7 +29,7 @@ type FakeAuthenticationV1alpha1 struct { } func (c *FakeAuthenticationV1alpha1) SelfSubjectReviews() v1alpha1.SelfSubjectReviewInterface { - return &FakeSelfSubjectReviews{c} + return newFakeSelfSubjectReviews(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go index 680460f45..1c0ebe2f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1alpha1 "k8s.io/api/authentication/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authenticationv1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" ) -// FakeSelfSubjectReviews implements SelfSubjectReviewInterface -type FakeSelfSubjectReviews struct { +// fakeSelfSubjectReviews implements SelfSubjectReviewInterface +type fakeSelfSubjectReviews struct { + *gentype.FakeClient[*v1alpha1.SelfSubjectReview] Fake *FakeAuthenticationV1alpha1 } -var selfsubjectreviewsResource = v1alpha1.SchemeGroupVersion.WithResource("selfsubjectreviews") - -var selfsubjectreviewsKind = v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectReview") - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { - emptyResult := &v1alpha1.SelfSubjectReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectReviews(fake *FakeAuthenticationV1alpha1) authenticationv1alpha1.SelfSubjectReviewInterface { + return &fakeSelfSubjectReviews{ + gentype.NewFakeClient[*v1alpha1.SelfSubjectReview]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("selfsubjectreviews"), + v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectReview"), + func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }, + ), + fake, } - return obj.(*v1alpha1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go index f034bcdbe..8d5b176f7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/authentication/v1alpha1" + authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SelfSubjectReviewsGetter interface { // SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. type SelfSubjectReviewInterface interface { - Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*v1alpha1.SelfSubjectReview, error) + Create(ctx context.Context, selfSubjectReview *authenticationv1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*authenticationv1alpha1.SelfSubjectReview, error) SelfSubjectReviewExpansion } // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - *gentype.Client[*v1alpha1.SelfSubjectReview] + *gentype.Client[*authenticationv1alpha1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews { return &selfSubjectReviews{ - gentype.NewClient[*v1alpha1.SelfSubjectReview]( + gentype.NewClient[*authenticationv1alpha1.SelfSubjectReview]( "selfsubjectreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }), + func() *authenticationv1alpha1.SelfSubjectReview { return &authenticationv1alpha1.SelfSubjectReview{} }, + gentype.PrefersProtobuf[*authenticationv1alpha1.SelfSubjectReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 7823729e0..7b22e46e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/authentication/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *AuthenticationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := authenticationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go index 1d72cf22f..28b5517ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go @@ -29,11 +29,11 @@ type FakeAuthenticationV1beta1 struct { } func (c *FakeAuthenticationV1beta1) SelfSubjectReviews() v1beta1.SelfSubjectReviewInterface { - return &FakeSelfSubjectReviews{c} + return newFakeSelfSubjectReviews(c) } func (c *FakeAuthenticationV1beta1) TokenReviews() v1beta1.TokenReviewInterface { - return &FakeTokenReviews{c} + return newFakeTokenReviews(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go index 33e130e9c..416c288b8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1beta1 "k8s.io/api/authentication/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" ) -// FakeSelfSubjectReviews implements SelfSubjectReviewInterface -type FakeSelfSubjectReviews struct { +// fakeSelfSubjectReviews implements SelfSubjectReviewInterface +type fakeSelfSubjectReviews struct { + *gentype.FakeClient[*v1beta1.SelfSubjectReview] Fake *FakeAuthenticationV1beta1 } -var selfsubjectreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectreviews") - -var selfsubjectreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectReview") - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { - emptyResult := &v1beta1.SelfSubjectReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectReviews(fake *FakeAuthenticationV1beta1) authenticationv1beta1.SelfSubjectReviewInterface { + return &fakeSelfSubjectReviews{ + gentype.NewFakeClient[*v1beta1.SelfSubjectReview]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("selfsubjectreviews"), + v1beta1.SchemeGroupVersion.WithKind("SelfSubjectReview"), + func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }, + ), + fake, } - return obj.(*v1beta1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index b512f5c14..daafb9be1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1beta1 "k8s.io/api/authentication/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" ) -// FakeTokenReviews implements TokenReviewInterface -type FakeTokenReviews struct { +// fakeTokenReviews implements TokenReviewInterface +type fakeTokenReviews struct { + *gentype.FakeClient[*v1beta1.TokenReview] Fake *FakeAuthenticationV1beta1 } -var tokenreviewsResource = v1beta1.SchemeGroupVersion.WithResource("tokenreviews") - -var tokenreviewsKind = v1beta1.SchemeGroupVersion.WithKind("TokenReview") - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { - emptyResult := &v1beta1.TokenReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeTokenReviews(fake *FakeAuthenticationV1beta1) authenticationv1beta1.TokenReviewInterface { + return &fakeTokenReviews{ + gentype.NewFakeClient[*v1beta1.TokenReview]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("tokenreviews"), + v1beta1.SchemeGroupVersion.WithKind("TokenReview"), + func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }, + ), + fake, } - return obj.(*v1beta1.TokenReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go index d083ba8fa..e29f81451 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authentication/v1beta1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SelfSubjectReviewsGetter interface { // SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. type SelfSubjectReviewInterface interface { - Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + Create(ctx context.Context, selfSubjectReview *authenticationv1beta1.SelfSubjectReview, opts v1.CreateOptions) (*authenticationv1beta1.SelfSubjectReview, error) SelfSubjectReviewExpansion } // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - *gentype.Client[*v1beta1.SelfSubjectReview] + *gentype.Client[*authenticationv1beta1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { return &selfSubjectReviews{ - gentype.NewClient[*v1beta1.SelfSubjectReview]( + gentype.NewClient[*authenticationv1beta1.SelfSubjectReview]( "selfsubjectreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }), + func() *authenticationv1beta1.SelfSubjectReview { return &authenticationv1beta1.SelfSubjectReview{} }, + gentype.PrefersProtobuf[*authenticationv1beta1.SelfSubjectReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 982534935..5e1e002be 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authentication/v1beta1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { - Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (*v1beta1.TokenReview, error) + Create(ctx context.Context, tokenReview *authenticationv1beta1.TokenReview, opts v1.CreateOptions) (*authenticationv1beta1.TokenReview, error) TokenReviewExpansion } // tokenReviews implements TokenReviewInterface type tokenReviews struct { - *gentype.Client[*v1beta1.TokenReview] + *gentype.Client[*authenticationv1beta1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { return &tokenReviews{ - gentype.NewClient[*v1beta1.TokenReview]( + gentype.NewClient[*authenticationv1beta1.TokenReview]( "tokenreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }), + func() *authenticationv1beta1.TokenReview { return &authenticationv1beta1.TokenReview{} }, + gentype.PrefersProtobuf[*authenticationv1beta1.TokenReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index edfc90346..71fb89b38 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/authorization/v1" - "k8s.io/client-go/kubernetes/scheme" + authorizationv1 "k8s.io/api/authorization/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AuthorizationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := authorizationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go index f7e823450..f96956bb4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go @@ -29,19 +29,19 @@ type FakeAuthorizationV1 struct { } func (c *FakeAuthorizationV1) LocalSubjectAccessReviews(namespace string) v1.LocalSubjectAccessReviewInterface { - return &FakeLocalSubjectAccessReviews{c, namespace} + return newFakeLocalSubjectAccessReviews(c, namespace) } func (c *FakeAuthorizationV1) SelfSubjectAccessReviews() v1.SelfSubjectAccessReviewInterface { - return &FakeSelfSubjectAccessReviews{c} + return newFakeSelfSubjectAccessReviews(c) } func (c *FakeAuthorizationV1) SelfSubjectRulesReviews() v1.SelfSubjectRulesReviewInterface { - return &FakeSelfSubjectRulesReviews{c} + return newFakeSelfSubjectRulesReviews(c) } func (c *FakeAuthorizationV1) SubjectAccessReviews() v1.SubjectAccessReviewInterface { - return &FakeSubjectAccessReviews{c} + return newFakeSubjectAccessReviews(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index dd23481d3..4b07d8763 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -19,31 +19,26 @@ limitations under the License. package fake import ( - "context" - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" ) -// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type FakeLocalSubjectAccessReviews struct { +// fakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type fakeLocalSubjectAccessReviews struct { + *gentype.FakeClient[*v1.LocalSubjectAccessReview] Fake *FakeAuthorizationV1 - ns string } -var localsubjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("localsubjectaccessreviews") - -var localsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview") - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { - emptyResult := &v1.LocalSubjectAccessReview{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeLocalSubjectAccessReviews(fake *FakeAuthorizationV1, namespace string) authorizationv1.LocalSubjectAccessReviewInterface { + return &fakeLocalSubjectAccessReviews{ + gentype.NewFakeClient[*v1.LocalSubjectAccessReview]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), + v1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview"), + func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }, + ), + fake, } - return obj.(*v1.LocalSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index d04b8502f..d55d555d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" ) -// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type FakeSelfSubjectAccessReviews struct { +// fakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type fakeSelfSubjectAccessReviews struct { + *gentype.FakeClient[*v1.SelfSubjectAccessReview] Fake *FakeAuthorizationV1 } -var selfsubjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews") - -var selfsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview") - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { - emptyResult := &v1.SelfSubjectAccessReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectAccessReviews(fake *FakeAuthorizationV1) authorizationv1.SelfSubjectAccessReviewInterface { + return &fakeSelfSubjectAccessReviews{ + gentype.NewFakeClient[*v1.SelfSubjectAccessReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), + v1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview"), + func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }, + ), + fake, } - return obj.(*v1.SelfSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index 71ed326f8..dcd1e05ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" ) -// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type FakeSelfSubjectRulesReviews struct { +// fakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type fakeSelfSubjectRulesReviews struct { + *gentype.FakeClient[*v1.SelfSubjectRulesReview] Fake *FakeAuthorizationV1 } -var selfsubjectrulesreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews") - -var selfsubjectrulesreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview") - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { - emptyResult := &v1.SelfSubjectRulesReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectRulesReviews(fake *FakeAuthorizationV1) authorizationv1.SelfSubjectRulesReviewInterface { + return &fakeSelfSubjectRulesReviews{ + gentype.NewFakeClient[*v1.SelfSubjectRulesReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), + v1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview"), + func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }, + ), + fake, } - return obj.(*v1.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index 358ba9aa7..4710ca6d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" ) -// FakeSubjectAccessReviews implements SubjectAccessReviewInterface -type FakeSubjectAccessReviews struct { +// fakeSubjectAccessReviews implements SubjectAccessReviewInterface +type fakeSubjectAccessReviews struct { + *gentype.FakeClient[*v1.SubjectAccessReview] Fake *FakeAuthorizationV1 } -var subjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("subjectaccessreviews") - -var subjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SubjectAccessReview") - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { - emptyResult := &v1.SubjectAccessReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSubjectAccessReviews(fake *FakeAuthorizationV1) authorizationv1.SubjectAccessReviewInterface { + return &fakeSubjectAccessReviews{ + gentype.NewFakeClient[*v1.SubjectAccessReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("subjectaccessreviews"), + v1.SchemeGroupVersion.WithKind("SubjectAccessReview"), + func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }, + ), + fake, } - return obj.(*v1.SubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index 3d058941a..24327e87f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { - Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*v1.LocalSubjectAccessReview, error) + Create(ctx context.Context, localSubjectAccessReview *authorizationv1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - *gentype.Client[*v1.LocalSubjectAccessReview] + *gentype.Client[*authorizationv1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - gentype.NewClient[*v1.LocalSubjectAccessReview]( + gentype.NewClient[*authorizationv1.LocalSubjectAccessReview]( "localsubjectaccessreviews", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }), + func() *authorizationv1.LocalSubjectAccessReview { return &authorizationv1.LocalSubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1.LocalSubjectAccessReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index 9e874bee5..014faeffb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { - Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*v1.SelfSubjectAccessReview, error) + Create(ctx context.Context, selfSubjectAccessReview *authorizationv1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - *gentype.Client[*v1.SelfSubjectAccessReview] + *gentype.Client[*authorizationv1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - gentype.NewClient[*v1.SelfSubjectAccessReview]( + gentype.NewClient[*authorizationv1.SelfSubjectAccessReview]( "selfsubjectaccessreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }), + func() *authorizationv1.SelfSubjectAccessReview { return &authorizationv1.SelfSubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1.SelfSubjectAccessReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 567b63ec4..a14b2d7d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { - Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*v1.SelfSubjectRulesReview, error) + Create(ctx context.Context, selfSubjectRulesReview *authorizationv1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - *gentype.Client[*v1.SelfSubjectRulesReview] + *gentype.Client[*authorizationv1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - gentype.NewClient[*v1.SelfSubjectRulesReview]( + gentype.NewClient[*authorizationv1.SelfSubjectRulesReview]( "selfsubjectrulesreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }), + func() *authorizationv1.SelfSubjectRulesReview { return &authorizationv1.SelfSubjectRulesReview{} }, + gentype.PrefersProtobuf[*authorizationv1.SelfSubjectRulesReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 52e8d74e5..bdc9955ad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { - Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (*v1.SubjectAccessReview, error) + Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) SubjectAccessReviewExpansion } // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - *gentype.Client[*v1.SubjectAccessReview] + *gentype.Client[*authorizationv1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { return &subjectAccessReviews{ - gentype.NewClient[*v1.SubjectAccessReview]( + gentype.NewClient[*authorizationv1.SubjectAccessReview]( "subjectaccessreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }), + func() *authorizationv1.SubjectAccessReview { return &authorizationv1.SubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1.SubjectAccessReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 23b0edf27..f33619eb3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/authorization/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AuthorizationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := authorizationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go index 8e328a57b..38fa676f4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go @@ -29,19 +29,19 @@ type FakeAuthorizationV1beta1 struct { } func (c *FakeAuthorizationV1beta1) LocalSubjectAccessReviews(namespace string) v1beta1.LocalSubjectAccessReviewInterface { - return &FakeLocalSubjectAccessReviews{c, namespace} + return newFakeLocalSubjectAccessReviews(c, namespace) } func (c *FakeAuthorizationV1beta1) SelfSubjectAccessReviews() v1beta1.SelfSubjectAccessReviewInterface { - return &FakeSelfSubjectAccessReviews{c} + return newFakeSelfSubjectAccessReviews(c) } func (c *FakeAuthorizationV1beta1) SelfSubjectRulesReviews() v1beta1.SelfSubjectRulesReviewInterface { - return &FakeSelfSubjectRulesReviews{c} + return newFakeSelfSubjectRulesReviews(c) } func (c *FakeAuthorizationV1beta1) SubjectAccessReviews() v1beta1.SubjectAccessReviewInterface { - return &FakeSubjectAccessReviews{c} + return newFakeSubjectAccessReviews(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index e2bf62773..7a874c5ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -19,31 +19,26 @@ limitations under the License. package fake import ( - "context" - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" ) -// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type FakeLocalSubjectAccessReviews struct { +// fakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type fakeLocalSubjectAccessReviews struct { + *gentype.FakeClient[*v1beta1.LocalSubjectAccessReview] Fake *FakeAuthorizationV1beta1 - ns string } -var localsubjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("localsubjectaccessreviews") - -var localsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview") - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { - emptyResult := &v1beta1.LocalSubjectAccessReview{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeLocalSubjectAccessReviews(fake *FakeAuthorizationV1beta1, namespace string) authorizationv1beta1.LocalSubjectAccessReviewInterface { + return &fakeLocalSubjectAccessReviews{ + gentype.NewFakeClient[*v1beta1.LocalSubjectAccessReview]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), + v1beta1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview"), + func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }, + ), + fake, } - return obj.(*v1beta1.LocalSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index 996e4d410..321a4bf36 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" ) -// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type FakeSelfSubjectAccessReviews struct { +// fakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type fakeSelfSubjectAccessReviews struct { + *gentype.FakeClient[*v1beta1.SelfSubjectAccessReview] Fake *FakeAuthorizationV1beta1 } -var selfsubjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews") - -var selfsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview") - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { - emptyResult := &v1beta1.SelfSubjectAccessReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectAccessReviews(fake *FakeAuthorizationV1beta1) authorizationv1beta1.SelfSubjectAccessReviewInterface { + return &fakeSelfSubjectAccessReviews{ + gentype.NewFakeClient[*v1beta1.SelfSubjectAccessReview]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), + v1beta1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview"), + func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }, + ), + fake, } - return obj.(*v1beta1.SelfSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 6e4c75890..96a737c33 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" ) -// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type FakeSelfSubjectRulesReviews struct { +// fakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type fakeSelfSubjectRulesReviews struct { + *gentype.FakeClient[*v1beta1.SelfSubjectRulesReview] Fake *FakeAuthorizationV1beta1 } -var selfsubjectrulesreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews") - -var selfsubjectrulesreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview") - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { - emptyResult := &v1beta1.SelfSubjectRulesReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSelfSubjectRulesReviews(fake *FakeAuthorizationV1beta1) authorizationv1beta1.SelfSubjectRulesReviewInterface { + return &fakeSelfSubjectRulesReviews{ + gentype.NewFakeClient[*v1beta1.SelfSubjectRulesReview]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), + v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview"), + func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }, + ), + fake, } - return obj.(*v1beta1.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index aab6e08dc..fdb2e1727 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -19,29 +19,26 @@ limitations under the License. package fake import ( - "context" - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" ) -// FakeSubjectAccessReviews implements SubjectAccessReviewInterface -type FakeSubjectAccessReviews struct { +// fakeSubjectAccessReviews implements SubjectAccessReviewInterface +type fakeSubjectAccessReviews struct { + *gentype.FakeClient[*v1beta1.SubjectAccessReview] Fake *FakeAuthorizationV1beta1 } -var subjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("subjectaccessreviews") - -var subjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SubjectAccessReview") - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { - emptyResult := &v1beta1.SubjectAccessReview{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeSubjectAccessReviews(fake *FakeAuthorizationV1beta1) authorizationv1beta1.SubjectAccessReviewInterface { + return &fakeSubjectAccessReviews{ + gentype.NewFakeClient[*v1beta1.SubjectAccessReview]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("subjectaccessreviews"), + v1beta1.SchemeGroupVersion.WithKind("SubjectAccessReview"), + func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }, + ), + fake, } - return obj.(*v1beta1.SubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 302c094b3..8dcc984f7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,27 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { - Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.LocalSubjectAccessReview, error) + Create(ctx context.Context, localSubjectAccessReview *authorizationv1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - *gentype.Client[*v1beta1.LocalSubjectAccessReview] + *gentype.Client[*authorizationv1beta1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - gentype.NewClient[*v1beta1.LocalSubjectAccessReview]( + gentype.NewClient[*authorizationv1beta1.LocalSubjectAccessReview]( "localsubjectaccessreviews", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }), + func() *authorizationv1beta1.LocalSubjectAccessReview { + return &authorizationv1beta1.LocalSubjectAccessReview{} + }, + gentype.PrefersProtobuf[*authorizationv1beta1.LocalSubjectAccessReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 4b413dc4f..b1f111f3f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,27 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { - Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error) + Create(ctx context.Context, selfSubjectAccessReview *authorizationv1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - *gentype.Client[*v1beta1.SelfSubjectAccessReview] + *gentype.Client[*authorizationv1beta1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - gentype.NewClient[*v1beta1.SelfSubjectAccessReview]( + gentype.NewClient[*authorizationv1beta1.SelfSubjectAccessReview]( "selfsubjectaccessreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }), + func() *authorizationv1beta1.SelfSubjectAccessReview { + return &authorizationv1beta1.SelfSubjectAccessReview{} + }, + gentype.PrefersProtobuf[*authorizationv1beta1.SelfSubjectAccessReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index b64cec301..11a11b8e6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,27 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { - Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectRulesReview, error) + Create(ctx context.Context, selfSubjectRulesReview *authorizationv1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*authorizationv1beta1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - *gentype.Client[*v1beta1.SelfSubjectRulesReview] + *gentype.Client[*authorizationv1beta1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - gentype.NewClient[*v1beta1.SelfSubjectRulesReview]( + gentype.NewClient[*authorizationv1beta1.SelfSubjectRulesReview]( "selfsubjectrulesreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }), + func() *authorizationv1beta1.SelfSubjectRulesReview { + return &authorizationv1beta1.SelfSubjectRulesReview{} + }, + gentype.PrefersProtobuf[*authorizationv1beta1.SelfSubjectRulesReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index 3fca833a1..b62537521 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -19,9 +19,9 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" @@ -35,23 +35,25 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { - Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SubjectAccessReview, error) + Create(ctx context.Context, subjectAccessReview *authorizationv1beta1.SubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.SubjectAccessReview, error) SubjectAccessReviewExpansion } // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - *gentype.Client[*v1beta1.SubjectAccessReview] + *gentype.Client[*authorizationv1beta1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { return &subjectAccessReviews{ - gentype.NewClient[*v1beta1.SubjectAccessReview]( + gentype.NewClient[*authorizationv1beta1.SubjectAccessReview]( "subjectaccessreviews", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }), + func() *authorizationv1beta1.SubjectAccessReview { return &authorizationv1beta1.SubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1beta1.SubjectAccessReview](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index f3a2752cb..6ceaaf82a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/autoscaling/v1" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv1 "k8s.io/api/autoscaling/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := autoscalingv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go index 99e26fcf3..3af0d3467 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go @@ -29,7 +29,7 @@ type FakeAutoscalingV1 struct { } func (c *FakeAutoscalingV1) HorizontalPodAutoscalers(namespace string) v1.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} + return newFakeHorizontalPodAutoscalers(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 23e2c391d..4f04d3256 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" ) -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { +// fakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type fakeHorizontalPodAutoscalers struct { + *gentype.FakeClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration] Fake *FakeAutoscalingV1 - ns string -} - -var horizontalpodautoscalersResource = v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - emptyResult := &v1.HorizontalPodAutoscalerList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.HorizontalPodAutoscalerList{ListMeta: obj.(*v1.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v1.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.HorizontalPodAutoscaler), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v1.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeHorizontalPodAutoscalers(fake *FakeAutoscalingV1, namespace string) typedautoscalingv1.HorizontalPodAutoscalerInterface { + return &fakeHorizontalPodAutoscalers{ + gentype.NewFakeClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"), + v1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"), + func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} }, + func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }, + func(dst, src *v1.HorizontalPodAutoscalerList) { dst.ListMeta = src.ListMeta }, + func(list *v1.HorizontalPodAutoscalerList) []*v1.HorizontalPodAutoscaler { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.HorizontalPodAutoscalerList, items []*v1.HorizontalPodAutoscaler) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 4d29ac522..c5c69b7c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/autoscaling/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts metav1.ListOptions) (*autoscalingv1.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - *gentype.ClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration] + *gentype.ClientWithListAndApply[*autoscalingv1.HorizontalPodAutoscaler, *autoscalingv1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - gentype.NewClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( + gentype.NewClientWithListAndApply[*autoscalingv1.HorizontalPodAutoscaler, *autoscalingv1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( "horizontalpodautoscalers", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} }, - func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }), + func() *autoscalingv1.HorizontalPodAutoscaler { return &autoscalingv1.HorizontalPodAutoscaler{} }, + func() *autoscalingv1.HorizontalPodAutoscalerList { return &autoscalingv1.HorizontalPodAutoscalerList{} }, + gentype.PrefersProtobuf[*autoscalingv1.HorizontalPodAutoscaler](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go index 04d5d0f94..78a2609bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v2 import ( - "net/http" + http "net/http" - v2 "k8s.io/api/autoscaling/v2" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv2 "k8s.io/api/autoscaling/v2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2.SchemeGroupVersion + gv := autoscalingv2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go index d4b907f4b..b0012fb24 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go @@ -29,7 +29,7 @@ type FakeAutoscalingV2 struct { } func (c *FakeAutoscalingV2) HorizontalPodAutoscalers(namespace string) v2.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} + return newFakeHorizontalPodAutoscalers(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go index 2ca3d27c9..66a1f56c7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v2 "k8s.io/api/autoscaling/v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedautoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" ) -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { +// fakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type fakeHorizontalPodAutoscalers struct { + *gentype.FakeClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration] Fake *FakeAutoscalingV2 - ns string -} - -var horizontalpodautoscalersResource = v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { - emptyResult := &v2.HorizontalPodAutoscalerList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2.HorizontalPodAutoscalerList{ListMeta: obj.(*v2.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v2.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2.HorizontalPodAutoscaler), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v2.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeHorizontalPodAutoscalers(fake *FakeAutoscalingV2, namespace string) typedautoscalingv2.HorizontalPodAutoscalerInterface { + return &fakeHorizontalPodAutoscalers{ + gentype.NewFakeClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( + fake.Fake, + namespace, + v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers"), + v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"), + func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} }, + func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }, + func(dst, src *v2.HorizontalPodAutoscalerList) { dst.ListMeta = src.ListMeta }, + func(list *v2.HorizontalPodAutoscalerList) []*v2.HorizontalPodAutoscaler { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2.HorizontalPodAutoscalerList, items []*v2.HorizontalPodAutoscaler) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go index dbce8d102..9eb4a6d93 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -19,13 +19,13 @@ limitations under the License. package v2 import ( - "context" + context "context" - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + applyconfigurationsautoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2.HorizontalPodAutoscaler, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - *gentype.ClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration] + *gentype.ClientWithListAndApply[*autoscalingv2.HorizontalPodAutoscaler, *autoscalingv2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - gentype.NewClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( + gentype.NewClientWithListAndApply[*autoscalingv2.HorizontalPodAutoscaler, *autoscalingv2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( "horizontalpodautoscalers", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} }, - func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }), + func() *autoscalingv2.HorizontalPodAutoscaler { return &autoscalingv2.HorizontalPodAutoscaler{} }, + func() *autoscalingv2.HorizontalPodAutoscalerList { return &autoscalingv2.HorizontalPodAutoscalerList{} }, + gentype.PrefersProtobuf[*autoscalingv2.HorizontalPodAutoscaler](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index d1dde5ed1..1fcda17c8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta1 import ( - "net/http" + http "net/http" - v2beta1 "k8s.io/api/autoscaling/v2beta1" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2beta1.SchemeGroupVersion + gv := autoscalingv2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go index be8e0f48e..ca4b461c2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go @@ -29,7 +29,7 @@ type FakeAutoscalingV2beta1 struct { } func (c *FakeAutoscalingV2beta1) HorizontalPodAutoscalers(namespace string) v2beta1.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} + return newFakeHorizontalPodAutoscalers(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 7f99b5e8f..8f28fb30c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v2beta1 "k8s.io/api/autoscaling/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedautoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" ) -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { +// fakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type fakeHorizontalPodAutoscalers struct { + *gentype.FakeClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] Fake *FakeAutoscalingV2beta1 - ns string -} - -var horizontalpodautoscalersResource = v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - emptyResult := &v2beta1.HorizontalPodAutoscalerList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta1.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta1.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v2beta1.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2beta1.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v2beta1.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeHorizontalPodAutoscalers(fake *FakeAutoscalingV2beta1, namespace string) typedautoscalingv2beta1.HorizontalPodAutoscalerInterface { + return &fakeHorizontalPodAutoscalers{ + gentype.NewFakeClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( + fake.Fake, + namespace, + v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"), + v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"), + func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} }, + func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }, + func(dst, src *v2beta1.HorizontalPodAutoscalerList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta1.HorizontalPodAutoscalerList) []*v2beta1.HorizontalPodAutoscaler { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta1.HorizontalPodAutoscalerList, items []*v2beta1.HorizontalPodAutoscaler) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2beta1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 6bc1b7776..c1dc75ccc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta1 import ( - "context" + context "context" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + applyconfigurationsautoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2beta1.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - *gentype.ClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] + *gentype.ClientWithListAndApply[*autoscalingv2beta1.HorizontalPodAutoscaler, *autoscalingv2beta1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - gentype.NewClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( + gentype.NewClientWithListAndApply[*autoscalingv2beta1.HorizontalPodAutoscaler, *autoscalingv2beta1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( "horizontalpodautoscalers", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} }, - func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }), + func() *autoscalingv2beta1.HorizontalPodAutoscaler { + return &autoscalingv2beta1.HorizontalPodAutoscaler{} + }, + func() *autoscalingv2beta1.HorizontalPodAutoscalerList { + return &autoscalingv2beta1.HorizontalPodAutoscalerList{} + }, + gentype.PrefersProtobuf[*autoscalingv2beta1.HorizontalPodAutoscaler](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index cae1b4e43..62f5b743c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta2 import ( - "net/http" + http "net/http" - v2beta2 "k8s.io/api/autoscaling/v2beta2" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2beta2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2beta2.SchemeGroupVersion + gv := autoscalingv2beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go index 8c36e0e81..99b5026bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go @@ -29,7 +29,7 @@ type FakeAutoscalingV2beta2 struct { } func (c *FakeAutoscalingV2beta2) HorizontalPodAutoscalers(namespace string) v2beta2.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} + return newFakeHorizontalPodAutoscalers(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index e037e8ac4..f747da9d0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v2beta2 "k8s.io/api/autoscaling/v2beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedautoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" ) -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { +// fakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type fakeHorizontalPodAutoscalers struct { + *gentype.FakeClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] Fake *FakeAutoscalingV2beta2 - ns string -} - -var horizontalpodautoscalersResource = v2beta2.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - emptyResult := &v2beta2.HorizontalPodAutoscalerList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta2.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta2.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v2beta2.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2beta2.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - emptyResult := &v2beta2.HorizontalPodAutoscaler{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeHorizontalPodAutoscalers(fake *FakeAutoscalingV2beta2, namespace string) typedautoscalingv2beta2.HorizontalPodAutoscalerInterface { + return &fakeHorizontalPodAutoscalers{ + gentype.NewFakeClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( + fake.Fake, + namespace, + v2beta2.SchemeGroupVersion.WithResource("horizontalpodautoscalers"), + v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"), + func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} }, + func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }, + func(dst, src *v2beta2.HorizontalPodAutoscalerList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta2.HorizontalPodAutoscalerList) []*v2beta2.HorizontalPodAutoscaler { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta2.HorizontalPodAutoscalerList, items []*v2beta2.HorizontalPodAutoscaler) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2beta2.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 6f464661a..017b3e1fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta2 import ( - "context" + context "context" - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + applyconfigurationsautoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2beta2.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - *gentype.ClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] + *gentype.ClientWithListAndApply[*autoscalingv2beta2.HorizontalPodAutoscaler, *autoscalingv2beta2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - gentype.NewClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( + gentype.NewClientWithListAndApply[*autoscalingv2beta2.HorizontalPodAutoscaler, *autoscalingv2beta2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( "horizontalpodautoscalers", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} }, - func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }), + func() *autoscalingv2beta2.HorizontalPodAutoscaler { + return &autoscalingv2beta2.HorizontalPodAutoscaler{} + }, + func() *autoscalingv2beta2.HorizontalPodAutoscalerList { + return &autoscalingv2beta2.HorizontalPodAutoscalerList{} + }, + gentype.PrefersProtobuf[*autoscalingv2beta2.HorizontalPodAutoscaler](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index eee144f71..614d049f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/batch/v1" - "k8s.io/client-go/kubernetes/scheme" + batchv1 "k8s.io/api/batch/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *BatchV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := batchv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go index 7907a5bf5..29ef3e9b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error) - Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) + Create(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.CreateOptions) (*batchv1.CronJob, error) + Update(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.UpdateOptions) (*batchv1.CronJob, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) + UpdateStatus(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.UpdateOptions) (*batchv1.CronJob, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CronJob, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CronJobList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*batchv1.CronJob, error) + List(ctx context.Context, opts metav1.ListOptions) (*batchv1.CronJobList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) - Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *batchv1.CronJob, err error) + Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.CronJob, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) + ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - *gentype.ClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration] + *gentype.ClientWithListAndApply[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1Client, namespace string) *cronJobs { return &cronJobs{ - gentype.NewClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]( + gentype.NewClientWithListAndApply[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration]( "cronjobs", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.CronJob { return &v1.CronJob{} }, - func() *v1.CronJobList { return &v1.CronJobList{} }), + func() *batchv1.CronJob { return &batchv1.CronJob{} }, + func() *batchv1.CronJobList { return &batchv1.CronJobList{} }, + gentype.PrefersProtobuf[*batchv1.CronJob](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go index 43d5b0d30..8fb3d18d8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go @@ -29,11 +29,11 @@ type FakeBatchV1 struct { } func (c *FakeBatchV1) CronJobs(namespace string) v1.CronJobInterface { - return &FakeCronJobs{c, namespace} + return newFakeCronJobs(c, namespace) } func (c *FakeBatchV1) Jobs(namespace string) v1.JobInterface { - return &FakeJobs{c, namespace} + return newFakeJobs(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go index 171bb8232..3624a73bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go @@ -19,179 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" ) -// FakeCronJobs implements CronJobInterface -type FakeCronJobs struct { +// fakeCronJobs implements CronJobInterface +type fakeCronJobs struct { + *gentype.FakeClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration] Fake *FakeBatchV1 - ns string -} - -var cronjobsResource = v1.SchemeGroupVersion.WithResource("cronjobs") - -var cronjobsKind = v1.SchemeGroupVersion.WithKind("CronJob") - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CronJob), err -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { - emptyResult := &v1.CronJobList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CronJobList{ListMeta: obj.(*v1.CronJobList).ListMeta} - for _, item := range obj.(*v1.CronJobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts)) - -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CronJob), err -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CronJob), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CronJob), err -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &v1.CronJob{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CronJobList{}) - return err -} - -// Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CronJob), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CronJob), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - emptyResult := &v1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeCronJobs(fake *FakeBatchV1, namespace string) typedbatchv1.CronJobInterface { + return &fakeCronJobs{ + gentype.NewFakeClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("cronjobs"), + v1.SchemeGroupVersion.WithKind("CronJob"), + func() *v1.CronJob { return &v1.CronJob{} }, + func() *v1.CronJobList { return &v1.CronJobList{} }, + func(dst, src *v1.CronJobList) { dst.ListMeta = src.ListMeta }, + func(list *v1.CronJobList) []*v1.CronJob { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.CronJobList, items []*v1.CronJob) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.CronJob), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index 23e66953c..33baee563 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -19,179 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" ) -// FakeJobs implements JobInterface -type FakeJobs struct { +// fakeJobs implements JobInterface +type fakeJobs struct { + *gentype.FakeClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration] Fake *FakeBatchV1 - ns string -} - -var jobsResource = v1.SchemeGroupVersion.WithResource("jobs") - -var jobsKind = v1.SchemeGroupVersion.WithKind("Job") - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *FakeJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(jobsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Job), err -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { - emptyResult := &v1.JobList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(jobsResource, jobsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.JobList{ListMeta: obj.(*v1.JobList).ListMeta} - for _, item := range obj.(*v1.JobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *FakeJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(jobsResource, c.ns, opts)) - -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Job), err -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Job), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(jobsResource, "status", c.ns, job, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Job), err -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *FakeJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(jobsResource, c.ns, name, opts), &v1.Job{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(jobsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.JobList{}) - return err -} - -// Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Job), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *FakeJobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Job), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeJobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - emptyResult := &v1.Job{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeJobs(fake *FakeBatchV1, namespace string) typedbatchv1.JobInterface { + return &fakeJobs{ + gentype.NewFakeClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("jobs"), + v1.SchemeGroupVersion.WithKind("Job"), + func() *v1.Job { return &v1.Job{} }, + func() *v1.JobList { return &v1.JobList{} }, + func(dst, src *v1.JobList) { dst.ListMeta = src.ListMeta }, + func(list *v1.JobList) []*v1.Job { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.JobList, items []*v1.Job) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Job), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index 83dbe6fa4..d77aa0f03 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type JobsGetter interface { // JobInterface has methods to work with Job resources. type JobInterface interface { - Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) - Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + Create(ctx context.Context, job *batchv1.Job, opts metav1.CreateOptions) (*batchv1.Job, error) + Update(ctx context.Context, job *batchv1.Job, opts metav1.UpdateOptions) (*batchv1.Job, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + UpdateStatus(ctx context.Context, job *batchv1.Job, opts metav1.UpdateOptions) (*batchv1.Job, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Job, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.JobList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*batchv1.Job, error) + List(ctx context.Context, opts metav1.ListOptions) (*batchv1.JobList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) - Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *batchv1.Job, err error) + Apply(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.Job, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) + ApplyStatus(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.Job, err error) JobExpansion } // jobs implements JobInterface type jobs struct { - *gentype.ClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration] + *gentype.ClientWithListAndApply[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration] } // newJobs returns a Jobs func newJobs(c *BatchV1Client, namespace string) *jobs { return &jobs{ - gentype.NewClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]( + gentype.NewClientWithListAndApply[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration]( "jobs", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Job { return &v1.Job{} }, - func() *v1.JobList { return &v1.JobList{} }), + func() *batchv1.Job { return &batchv1.Job{} }, + func() *batchv1.JobList { return &batchv1.JobList{} }, + gentype.PrefersProtobuf[*batchv1.Job](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index ebbf063ec..2da9e4135 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + batchv1beta1 "k8s.io/api/batch/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *BatchV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := batchv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index a6f7399d8..3091020ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/batch/v1beta1" + batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + applyconfigurationsbatchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) - Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + Create(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.CreateOptions) (*batchv1beta1.CronJob, error) + Update(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.UpdateOptions) (*batchv1beta1.CronJob, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + UpdateStatus(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.UpdateOptions) (*batchv1beta1.CronJob, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CronJob, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CronJobList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*batchv1beta1.CronJob, error) + List(ctx context.Context, opts v1.ListOptions) (*batchv1beta1.CronJobList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) - Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1beta1.CronJob, err error) + Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1beta1.CronJob, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) + ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1beta1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - *gentype.ClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration] + *gentype.ClientWithListAndApply[*batchv1beta1.CronJob, *batchv1beta1.CronJobList, *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { return &cronJobs{ - gentype.NewClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]( + gentype.NewClientWithListAndApply[*batchv1beta1.CronJob, *batchv1beta1.CronJobList, *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration]( "cronjobs", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.CronJob { return &v1beta1.CronJob{} }, - func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }), + func() *batchv1beta1.CronJob { return &batchv1beta1.CronJob{} }, + func() *batchv1beta1.CronJobList { return &batchv1beta1.CronJobList{} }, + gentype.PrefersProtobuf[*batchv1beta1.CronJob](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go index 6f350aed9..48cabb71e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go @@ -29,7 +29,7 @@ type FakeBatchV1beta1 struct { } func (c *FakeBatchV1beta1) CronJobs(namespace string) v1beta1.CronJobInterface { - return &FakeCronJobs{c, namespace} + return newFakeCronJobs(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 71cd4f165..05b99fadc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/batch/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" ) -// FakeCronJobs implements CronJobInterface -type FakeCronJobs struct { +// fakeCronJobs implements CronJobInterface +type fakeCronJobs struct { + *gentype.FakeClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration] Fake *FakeBatchV1beta1 - ns string -} - -var cronjobsResource = v1beta1.SchemeGroupVersion.WithResource("cronjobs") - -var cronjobsKind = v1beta1.SchemeGroupVersion.WithKind("CronJob") - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CronJob), err -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - emptyResult := &v1beta1.CronJobList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CronJobList{ListMeta: obj.(*v1beta1.CronJobList).ListMeta} - for _, item := range obj.(*v1beta1.CronJobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts)) - -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CronJob), err -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CronJob), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CronJob), err -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &v1beta1.CronJob{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{}) - return err -} - -// Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CronJob), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CronJob), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - emptyResult := &v1beta1.CronJob{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeCronJobs(fake *FakeBatchV1beta1, namespace string) typedbatchv1beta1.CronJobInterface { + return &fakeCronJobs{ + gentype.NewFakeClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("cronjobs"), + v1beta1.SchemeGroupVersion.WithKind("CronJob"), + func() *v1beta1.CronJob { return &v1beta1.CronJob{} }, + func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }, + func(dst, src *v1beta1.CronJobList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.CronJobList) []*v1beta1.CronJob { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.CronJobList, items []*v1beta1.CronJob) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.CronJob), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go index 6d87c539e..60337cd23 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/certificates/v1" - "k8s.io/client-go/kubernetes/scheme" + certificatesv1 "k8s.io/api/certificates/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := certificatesv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go index 9fa3300e6..6863a22d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/certificates/v1" + certificatesv1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,46 +38,51 @@ type CertificateSigningRequestsGetter interface { // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. type CertificateSigningRequestInterface interface { - Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error) - Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + Create(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.CreateOptions) (*certificatesv1.CertificateSigningRequest, error) + Update(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CertificateSigningRequest, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CertificateSigningRequestList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*certificatesv1.CertificateSigningRequest, error) + List(ctx context.Context, opts metav1.ListOptions) (*certificatesv1.CertificateSigningRequestList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) - Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *certificatesv1.CertificateSigningRequest, err error) + Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) - UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error) + UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - *gentype.ClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration] + *gentype.ClientWithListAndApply[*certificatesv1.CertificateSigningRequest, *certificatesv1.CertificateSigningRequestList, *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests { return &certificateSigningRequests{ - gentype.NewClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]( + gentype.NewClientWithListAndApply[*certificatesv1.CertificateSigningRequest, *certificatesv1.CertificateSigningRequestList, *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration]( "certificatesigningrequests", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} }, - func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }), + func() *certificatesv1.CertificateSigningRequest { return &certificatesv1.CertificateSigningRequest{} }, + func() *certificatesv1.CertificateSigningRequestList { + return &certificatesv1.CertificateSigningRequestList{} + }, + gentype.PrefersProtobuf[*certificatesv1.CertificateSigningRequest](), + ), } } // UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} +func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificatesv1.CertificateSigningRequest, err error) { + result = &certificatesv1.CertificateSigningRequest{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Resource("certificatesigningrequests"). Name(certificateSigningRequestName). SubResource("approval"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go index 4779d6169..782ebd95e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go @@ -29,7 +29,7 @@ type FakeCertificatesV1 struct { } func (c *FakeCertificatesV1) CertificateSigningRequests() v1.CertificateSigningRequestInterface { - return &FakeCertificateSigningRequests{c} + return newFakeCertificateSigningRequests(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go index f3fc99f83..784e74a37 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go @@ -19,177 +19,48 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" + context "context" v1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + gentype "k8s.io/client-go/gentype" + typedcertificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" testing "k8s.io/client-go/testing" ) -// FakeCertificateSigningRequests implements CertificateSigningRequestInterface -type FakeCertificateSigningRequests struct { +// fakeCertificateSigningRequests implements CertificateSigningRequestInterface +type fakeCertificateSigningRequests struct { + *gentype.FakeClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration] Fake *FakeCertificatesV1 } -var certificatesigningrequestsResource = v1.SchemeGroupVersion.WithResource("certificatesigningrequests") - -var certificatesigningrequestsKind = v1.SchemeGroupVersion.WithKind("CertificateSigningRequest") - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { - emptyResult := &v1.CertificateSigningRequestList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() +func newFakeCertificateSigningRequests(fake *FakeCertificatesV1) typedcertificatesv1.CertificateSigningRequestInterface { + return &fakeCertificateSigningRequests{ + gentype.NewFakeClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("certificatesigningrequests"), + v1.SchemeGroupVersion.WithKind("CertificateSigningRequest"), + func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} }, + func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }, + func(dst, src *v1.CertificateSigningRequestList) { dst.ListMeta = src.ListMeta }, + func(list *v1.CertificateSigningRequestList) []*v1.CertificateSigningRequest { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.CertificateSigningRequestList, items []*v1.CertificateSigningRequest) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - list := &v1.CertificateSigningRequestList{ListMeta: obj.(*v1.CertificateSigningRequestList).ListMeta} - for _, item := range obj.(*v1.CertificateSigningRequestList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts)) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &v1.CertificateSigningRequest{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CertificateSigningRequestList{}) - return err -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - emptyResult := &v1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CertificateSigningRequest), err } // UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { +func (c *fakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "approval", certificateSigningRequest, opts), emptyResult) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(c.Resource(), "approval", certificateSigningRequest, opts), emptyResult) if obj == nil { return emptyResult, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go index a9050af94..36e08253a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/certificates/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := certificatesv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go index 74fe9fa14..df215ff53 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/certificates/v1alpha1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,36 @@ type ClusterTrustBundlesGetter interface { // ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. type ClusterTrustBundleInterface interface { - Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) - Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Create(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*certificatesv1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*certificatesv1alpha1.ClusterTrustBundle, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*certificatesv1alpha1.ClusterTrustBundleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) - Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1alpha1.ClusterTrustBundle, err error) ClusterTrustBundleExpansion } // clusterTrustBundles implements ClusterTrustBundleInterface type clusterTrustBundles struct { - *gentype.ClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration] + *gentype.ClientWithListAndApply[*certificatesv1alpha1.ClusterTrustBundle, *certificatesv1alpha1.ClusterTrustBundleList, *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration] } // newClusterTrustBundles returns a ClusterTrustBundles func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { return &clusterTrustBundles{ - gentype.NewClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( + gentype.NewClientWithListAndApply[*certificatesv1alpha1.ClusterTrustBundle, *certificatesv1alpha1.ClusterTrustBundleList, *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( "clustertrustbundles", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} }, - func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }), + func() *certificatesv1alpha1.ClusterTrustBundle { return &certificatesv1alpha1.ClusterTrustBundle{} }, + func() *certificatesv1alpha1.ClusterTrustBundleList { + return &certificatesv1alpha1.ClusterTrustBundleList{} + }, + gentype.PrefersProtobuf[*certificatesv1alpha1.ClusterTrustBundle](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go index 8ff02cdbb..491e38100 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go @@ -29,7 +29,7 @@ type FakeCertificatesV1alpha1 struct { } func (c *FakeCertificatesV1alpha1) ClusterTrustBundles() v1alpha1.ClusterTrustBundleInterface { - return &FakeClusterTrustBundles{c} + return newFakeClusterTrustBundles(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go index 1c4e97bd4..f2b5fa292 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/certificates/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcertificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" ) -// FakeClusterTrustBundles implements ClusterTrustBundleInterface -type FakeClusterTrustBundles struct { +// fakeClusterTrustBundles implements ClusterTrustBundleInterface +type fakeClusterTrustBundles struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration] Fake *FakeCertificatesV1alpha1 } -var clustertrustbundlesResource = v1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles") - -var clustertrustbundlesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundle") - -// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. -func (c *FakeClusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - emptyResult := &v1alpha1.ClusterTrustBundle{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clustertrustbundlesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. -func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { - emptyResult := &v1alpha1.ClusterTrustBundleList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clustertrustbundlesResource, clustertrustbundlesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterTrustBundleList{ListMeta: obj.(*v1alpha1.ClusterTrustBundleList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterTrustBundleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterTrustBundles. -func (c *FakeClusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clustertrustbundlesResource, opts)) -} - -// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *FakeClusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - emptyResult := &v1alpha1.ClusterTrustBundle{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *FakeClusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - emptyResult := &v1alpha1.ClusterTrustBundle{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. -func (c *FakeClusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustertrustbundlesResource, name, opts), &v1alpha1.ClusterTrustBundle{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clustertrustbundlesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterTrustBundleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterTrustBundle. -func (c *FakeClusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { - emptyResult := &v1alpha1.ClusterTrustBundle{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. -func (c *FakeClusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - if clusterTrustBundle == nil { - return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") - } - data, err := json.Marshal(clusterTrustBundle) - if err != nil { - return nil, err - } - name := clusterTrustBundle.Name - if name == nil { - return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ClusterTrustBundle{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterTrustBundles(fake *FakeCertificatesV1alpha1) typedcertificatesv1alpha1.ClusterTrustBundleInterface { + return &fakeClusterTrustBundles{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles"), + v1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundle"), + func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} }, + func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }, + func(dst, src *v1alpha1.ClusterTrustBundleList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ClusterTrustBundleList) []*v1alpha1.ClusterTrustBundle { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ClusterTrustBundleList, items []*v1alpha1.ClusterTrustBundle) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.ClusterTrustBundle), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index fa97b441d..f040e7664 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/certificates/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := certificatesv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index de9915c5d..4c6e28c65 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/certificates/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type CertificateSigningRequestsGetter interface { // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. type CertificateSigningRequestInterface interface { - Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) - Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + Create(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*certificatesv1beta1.CertificateSigningRequest, error) + Update(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1beta1.CertificateSigningRequest, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1beta1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1beta1.CertificateSigningRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*certificatesv1beta1.CertificateSigningRequestList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) - Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1beta1.CertificateSigningRequest, err error) + Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.CertificateSigningRequest, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) + ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - *gentype.ClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration] + *gentype.ClientWithListAndApply[*certificatesv1beta1.CertificateSigningRequest, *certificatesv1beta1.CertificateSigningRequestList, *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { return &certificateSigningRequests{ - gentype.NewClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]( + gentype.NewClientWithListAndApply[*certificatesv1beta1.CertificateSigningRequest, *certificatesv1beta1.CertificateSigningRequestList, *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration]( "certificatesigningrequests", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} }, - func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }), + func() *certificatesv1beta1.CertificateSigningRequest { + return &certificatesv1beta1.CertificateSigningRequest{} + }, + func() *certificatesv1beta1.CertificateSigningRequestList { + return &certificatesv1beta1.CertificateSigningRequestList{} + }, + gentype.PrefersProtobuf[*certificatesv1beta1.CertificateSigningRequest](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go index 29d8b088e..313df7abd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go @@ -29,7 +29,7 @@ type FakeCertificatesV1beta1 struct { } func (c *FakeCertificatesV1beta1) CertificateSigningRequests() v1beta1.CertificateSigningRequestInterface { - return &FakeCertificateSigningRequests{c} + return newFakeCertificateSigningRequests(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index ff5a9bd4c..a5f144cb8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/certificates/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" ) -// FakeCertificateSigningRequests implements CertificateSigningRequestInterface -type FakeCertificateSigningRequests struct { +// fakeCertificateSigningRequests implements CertificateSigningRequestInterface +type fakeCertificateSigningRequests struct { + *gentype.FakeClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration] Fake *FakeCertificatesV1beta1 } -var certificatesigningrequestsResource = v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests") - -var certificatesigningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest") - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - emptyResult := &v1beta1.CertificateSigningRequestList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CertificateSigningRequestList{ListMeta: obj.(*v1beta1.CertificateSigningRequestList).ListMeta} - for _, item := range obj.(*v1beta1.CertificateSigningRequestList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts)) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &v1beta1.CertificateSigningRequest{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) - return err -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - emptyResult := &v1beta1.CertificateSigningRequest{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeCertificateSigningRequests(fake *FakeCertificatesV1beta1) typedcertificatesv1beta1.CertificateSigningRequestInterface { + return &fakeCertificateSigningRequests{ + gentype.NewFakeClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"), + v1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest"), + func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} }, + func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }, + func(dst, src *v1beta1.CertificateSigningRequestList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.CertificateSigningRequestList) []*v1beta1.CertificateSigningRequest { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.CertificateSigningRequestList, items []*v1beta1.CertificateSigningRequest) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.CertificateSigningRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go index 2c3eaf971..5d881e45e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go @@ -24,9 +24,9 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { +func (c *fakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &certificates.CertificateSigningRequest{}) + Invokes(core.NewRootUpdateSubresourceAction(c.Resource(), "approval", certificateSigningRequest), &certificates.CertificateSigningRequest{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go index e19469d53..427cb7e93 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/coordination/v1" - "k8s.io/client-go/kubernetes/scheme" + coordinationv1 "k8s.io/api/coordination/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CoordinationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := coordinationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go index 6920275b2..fba319348 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go @@ -29,7 +29,7 @@ type FakeCoordinationV1 struct { } func (c *FakeCoordinationV1) Leases(namespace string) v1.LeaseInterface { - return &FakeLeases{c, namespace} + return newFakeLeases(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index 03f833f37..1f0f6acd3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcoordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" ) -// FakeLeases implements LeaseInterface -type FakeLeases struct { +// fakeLeases implements LeaseInterface +type fakeLeases struct { + *gentype.FakeClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration] Fake *FakeCoordinationV1 - ns string -} - -var leasesResource = v1.SchemeGroupVersion.WithResource("leases") - -var leasesKind = v1.SchemeGroupVersion.WithKind("Lease") - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { - emptyResult := &v1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Lease), err -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { - emptyResult := &v1.LeaseList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.LeaseList{ListMeta: obj.(*v1.LeaseList).ListMeta} - for _, item := range obj.(*v1.LeaseList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts)) - } -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { - emptyResult := &v1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Lease), err -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { - emptyResult := &v1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Lease), err -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &v1.Lease{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.LeaseList{}) - return err -} - -// Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { - emptyResult := &v1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Lease), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - emptyResult := &v1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeLeases(fake *FakeCoordinationV1, namespace string) typedcoordinationv1.LeaseInterface { + return &fakeLeases{ + gentype.NewFakeClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("leases"), + v1.SchemeGroupVersion.WithKind("Lease"), + func() *v1.Lease { return &v1.Lease{} }, + func() *v1.LeaseList { return &v1.LeaseList{} }, + func(dst, src *v1.LeaseList) { dst.ListMeta = src.ListMeta }, + func(list *v1.LeaseList) []*v1.Lease { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.LeaseList, items []*v1.Lease) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Lease), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index 97834d6ac..6e7784d6a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/coordination/v1" + coordinationv1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (*v1.Lease, error) - Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (*v1.Lease, error) + Create(ctx context.Context, lease *coordinationv1.Lease, opts metav1.CreateOptions) (*coordinationv1.Lease, error) + Update(ctx context.Context, lease *coordinationv1.Lease, opts metav1.UpdateOptions) (*coordinationv1.Lease, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Lease, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.LeaseList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*coordinationv1.Lease, error) + List(ctx context.Context, opts metav1.ListOptions) (*coordinationv1.LeaseList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) - Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *coordinationv1.Lease, err error) + Apply(ctx context.Context, lease *applyconfigurationscoordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *coordinationv1.Lease, err error) LeaseExpansion } // leases implements LeaseInterface type leases struct { - *gentype.ClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration] + *gentype.ClientWithListAndApply[*coordinationv1.Lease, *coordinationv1.LeaseList, *applyconfigurationscoordinationv1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1Client, namespace string) *leases { return &leases{ - gentype.NewClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]( + gentype.NewClientWithListAndApply[*coordinationv1.Lease, *coordinationv1.LeaseList, *applyconfigurationscoordinationv1.LeaseApplyConfiguration]( "leases", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Lease { return &v1.Lease{} }, - func() *v1.LeaseList { return &v1.LeaseList{} }), + func() *coordinationv1.Lease { return &coordinationv1.Lease{} }, + func() *coordinationv1.LeaseList { return &coordinationv1.LeaseList{} }, + gentype.PrefersProtobuf[*coordinationv1.Lease](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go deleted file mode 100644 index c3de2303c..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/coordination/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeLeaseCandidates implements LeaseCandidateInterface -type FakeLeaseCandidates struct { - Fake *FakeCoordinationV1alpha1 - ns string -} - -var leasecandidatesResource = v1alpha1.SchemeGroupVersion.WithResource("leasecandidates") - -var leasecandidatesKind = v1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate") - -// Get takes name of the leaseCandidate, and returns the corresponding leaseCandidate object, and an error if there is any. -func (c *FakeLeaseCandidates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.LeaseCandidate, err error) { - emptyResult := &v1alpha1.LeaseCandidate{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(leasecandidatesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.LeaseCandidate), err -} - -// List takes label and field selectors, and returns the list of LeaseCandidates that match those selectors. -func (c *FakeLeaseCandidates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.LeaseCandidateList, err error) { - emptyResult := &v1alpha1.LeaseCandidateList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(leasecandidatesResource, leasecandidatesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.LeaseCandidateList{ListMeta: obj.(*v1alpha1.LeaseCandidateList).ListMeta} - for _, item := range obj.(*v1alpha1.LeaseCandidateList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested leaseCandidates. -func (c *FakeLeaseCandidates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(leasecandidatesResource, c.ns, opts)) - -} - -// Create takes the representation of a leaseCandidate and creates it. Returns the server's representation of the leaseCandidate, and an error, if there is any. -func (c *FakeLeaseCandidates) Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (result *v1alpha1.LeaseCandidate, err error) { - emptyResult := &v1alpha1.LeaseCandidate{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.LeaseCandidate), err -} - -// Update takes the representation of a leaseCandidate and updates it. Returns the server's representation of the leaseCandidate, and an error, if there is any. -func (c *FakeLeaseCandidates) Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (result *v1alpha1.LeaseCandidate, err error) { - emptyResult := &v1alpha1.LeaseCandidate{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.LeaseCandidate), err -} - -// Delete takes name of the leaseCandidate and deletes it. Returns an error if one occurs. -func (c *FakeLeaseCandidates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(leasecandidatesResource, c.ns, name, opts), &v1alpha1.LeaseCandidate{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLeaseCandidates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(leasecandidatesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.LeaseCandidateList{}) - return err -} - -// Patch applies the patch and returns the patched leaseCandidate. -func (c *FakeLeaseCandidates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) { - emptyResult := &v1alpha1.LeaseCandidate{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.LeaseCandidate), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied leaseCandidate. -func (c *FakeLeaseCandidates) Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) { - if leaseCandidate == nil { - return nil, fmt.Errorf("leaseCandidate provided to Apply must not be nil") - } - data, err := json.Marshal(leaseCandidate) - if err != nil { - return nil, err - } - name := leaseCandidate.Name - if name == nil { - return nil, fmt.Errorf("leaseCandidate.Name must be provided to Apply") - } - emptyResult := &v1alpha1.LeaseCandidate{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.LeaseCandidate), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go similarity index 65% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go index dd75e5d01..4c286d463 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go @@ -16,34 +16,34 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/coordination/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type CoordinationV1alpha1Interface interface { +type CoordinationV1alpha2Interface interface { RESTClient() rest.Interface LeaseCandidatesGetter } -// CoordinationV1alpha1Client is used to interact with features provided by the coordination.k8s.io group. -type CoordinationV1alpha1Client struct { +// CoordinationV1alpha2Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1alpha2Client struct { restClient rest.Interface } -func (c *CoordinationV1alpha1Client) LeaseCandidates(namespace string) LeaseCandidateInterface { +func (c *CoordinationV1alpha2Client) LeaseCandidates(namespace string) LeaseCandidateInterface { return newLeaseCandidates(c, namespace) } -// NewForConfig creates a new CoordinationV1alpha1Client for the given config. +// NewForConfig creates a new CoordinationV1alpha2Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*CoordinationV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -55,9 +55,9 @@ func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new CoordinationV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new CoordinationV1alpha2Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -66,12 +66,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha if err != nil { return nil, err } - return &CoordinationV1alpha1Client{client}, nil + return &CoordinationV1alpha2Client{client}, nil } -// NewForConfigOrDie creates a new CoordinationV1alpha1Client for the given config and +// NewForConfigOrDie creates a new CoordinationV1alpha2Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha2Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -79,16 +79,16 @@ func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client { return client } -// New creates a new CoordinationV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *CoordinationV1alpha1Client { - return &CoordinationV1alpha1Client{c} +// New creates a new CoordinationV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1alpha2Client { + return &CoordinationV1alpha2Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := coordinationv1alpha2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() @@ -99,7 +99,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *CoordinationV1alpha1Client) RESTClient() rest.Interface { +func (c *CoordinationV1alpha2Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go index df51baa4d..baaf2d985 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha1 +package v1alpha2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_coordination_client.go similarity index 73% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_coordination_client.go index 2e7d4be26..6b73b6744 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_coordination_client.go @@ -19,22 +19,22 @@ limitations under the License. package fake import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + v1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeCoordinationV1alpha1 struct { +type FakeCoordinationV1alpha2 struct { *testing.Fake } -func (c *FakeCoordinationV1alpha1) LeaseCandidates(namespace string) v1alpha1.LeaseCandidateInterface { - return &FakeLeaseCandidates{c, namespace} +func (c *FakeCoordinationV1alpha2) LeaseCandidates(namespace string) v1alpha2.LeaseCandidateInterface { + return newFakeLeaseCandidates(c, namespace) } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeCoordinationV1alpha1) RESTClient() rest.Interface { +func (c *FakeCoordinationV1alpha2) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_leasecandidate.go new file mode 100644 index 000000000..671a6df0c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_leasecandidate.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "k8s.io/api/coordination/v1alpha2" + coordinationv1alpha2 "k8s.io/client-go/applyconfigurations/coordination/v1alpha2" + gentype "k8s.io/client-go/gentype" + typedcoordinationv1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" +) + +// fakeLeaseCandidates implements LeaseCandidateInterface +type fakeLeaseCandidates struct { + *gentype.FakeClientWithListAndApply[*v1alpha2.LeaseCandidate, *v1alpha2.LeaseCandidateList, *coordinationv1alpha2.LeaseCandidateApplyConfiguration] + Fake *FakeCoordinationV1alpha2 +} + +func newFakeLeaseCandidates(fake *FakeCoordinationV1alpha2, namespace string) typedcoordinationv1alpha2.LeaseCandidateInterface { + return &fakeLeaseCandidates{ + gentype.NewFakeClientWithListAndApply[*v1alpha2.LeaseCandidate, *v1alpha2.LeaseCandidateList, *coordinationv1alpha2.LeaseCandidateApplyConfiguration]( + fake.Fake, + namespace, + v1alpha2.SchemeGroupVersion.WithResource("leasecandidates"), + v1alpha2.SchemeGroupVersion.WithKind("LeaseCandidate"), + func() *v1alpha2.LeaseCandidate { return &v1alpha2.LeaseCandidate{} }, + func() *v1alpha2.LeaseCandidateList { return &v1alpha2.LeaseCandidateList{} }, + func(dst, src *v1alpha2.LeaseCandidateList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha2.LeaseCandidateList) []*v1alpha2.LeaseCandidate { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha2.LeaseCandidateList, items []*v1alpha2.LeaseCandidate) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go index 2dc2f30cf..52af4786c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go @@ -16,6 +16,6 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 type LeaseCandidateExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go similarity index 52% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go index 868185135..c994a8893 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/coordination/v1alpha1" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + applyconfigurationscoordinationv1alpha2 "k8s.io/client-go/applyconfigurations/coordination/v1alpha2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type LeaseCandidatesGetter interface { // LeaseCandidateInterface has methods to work with LeaseCandidate resources. type LeaseCandidateInterface interface { - Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (*v1alpha1.LeaseCandidate, error) - Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (*v1alpha1.LeaseCandidate, error) + Create(ctx context.Context, leaseCandidate *coordinationv1alpha2.LeaseCandidate, opts v1.CreateOptions) (*coordinationv1alpha2.LeaseCandidate, error) + Update(ctx context.Context, leaseCandidate *coordinationv1alpha2.LeaseCandidate, opts v1.UpdateOptions) (*coordinationv1alpha2.LeaseCandidate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.LeaseCandidate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LeaseCandidateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*coordinationv1alpha2.LeaseCandidate, error) + List(ctx context.Context, opts v1.ListOptions) (*coordinationv1alpha2.LeaseCandidateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) - Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1alpha2.LeaseCandidate, err error) + Apply(ctx context.Context, leaseCandidate *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1alpha2.LeaseCandidate, err error) LeaseCandidateExpansion } // leaseCandidates implements LeaseCandidateInterface type leaseCandidates struct { - *gentype.ClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration] + *gentype.ClientWithListAndApply[*coordinationv1alpha2.LeaseCandidate, *coordinationv1alpha2.LeaseCandidateList, *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration] } // newLeaseCandidates returns a LeaseCandidates -func newLeaseCandidates(c *CoordinationV1alpha1Client, namespace string) *leaseCandidates { +func newLeaseCandidates(c *CoordinationV1alpha2Client, namespace string) *leaseCandidates { return &leaseCandidates{ - gentype.NewClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration]( + gentype.NewClientWithListAndApply[*coordinationv1alpha2.LeaseCandidate, *coordinationv1alpha2.LeaseCandidateList, *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration]( "leasecandidates", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha1.LeaseCandidate { return &v1alpha1.LeaseCandidate{} }, - func() *v1alpha1.LeaseCandidateList { return &v1alpha1.LeaseCandidateList{} }), + func() *coordinationv1alpha2.LeaseCandidate { return &coordinationv1alpha2.LeaseCandidate{} }, + func() *coordinationv1alpha2.LeaseCandidateList { return &coordinationv1alpha2.LeaseCandidateList{} }, + gentype.PrefersProtobuf[*coordinationv1alpha2.LeaseCandidate](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index 27d674e23..1f1afba24 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/coordination/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CoordinationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := coordinationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go index f583b466e..41b3ce06b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go @@ -29,7 +29,7 @@ type FakeCoordinationV1beta1 struct { } func (c *FakeCoordinationV1beta1) Leases(namespace string) v1beta1.LeaseInterface { - return &FakeLeases{c, namespace} + return newFakeLeases(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 112784af9..bb4b8e04f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/coordination/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" ) -// FakeLeases implements LeaseInterface -type FakeLeases struct { +// fakeLeases implements LeaseInterface +type fakeLeases struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration] Fake *FakeCoordinationV1beta1 - ns string -} - -var leasesResource = v1beta1.SchemeGroupVersion.WithResource("leases") - -var leasesKind = v1beta1.SchemeGroupVersion.WithKind("Lease") - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - emptyResult := &v1beta1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Lease), err -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - emptyResult := &v1beta1.LeaseList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.LeaseList{ListMeta: obj.(*v1beta1.LeaseList).ListMeta} - for _, item := range obj.(*v1beta1.LeaseList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts)) - } -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { - emptyResult := &v1beta1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Lease), err -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { - emptyResult := &v1beta1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Lease), err -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &v1beta1.Lease{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.LeaseList{}) - return err -} - -// Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { - emptyResult := &v1beta1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Lease), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - emptyResult := &v1beta1.Lease{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeLeases(fake *FakeCoordinationV1beta1, namespace string) typedcoordinationv1beta1.LeaseInterface { + return &fakeLeases{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("leases"), + v1beta1.SchemeGroupVersion.WithKind("Lease"), + func() *v1beta1.Lease { return &v1beta1.Lease{} }, + func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }, + func(dst, src *v1beta1.LeaseList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.LeaseList) []*v1beta1.Lease { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.LeaseList, items []*v1beta1.Lease) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1beta1.Lease), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 62341e53b..18ca9823c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/coordination/v1beta1" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (*v1beta1.Lease, error) - Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (*v1beta1.Lease, error) + Create(ctx context.Context, lease *coordinationv1beta1.Lease, opts v1.CreateOptions) (*coordinationv1beta1.Lease, error) + Update(ctx context.Context, lease *coordinationv1beta1.Lease, opts v1.UpdateOptions) (*coordinationv1beta1.Lease, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Lease, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.LeaseList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*coordinationv1beta1.Lease, error) + List(ctx context.Context, opts v1.ListOptions) (*coordinationv1beta1.LeaseList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) - Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1beta1.Lease, err error) + Apply(ctx context.Context, lease *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1beta1.Lease, err error) LeaseExpansion } // leases implements LeaseInterface type leases struct { - *gentype.ClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration] + *gentype.ClientWithListAndApply[*coordinationv1beta1.Lease, *coordinationv1beta1.LeaseList, *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { return &leases{ - gentype.NewClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]( + gentype.NewClientWithListAndApply[*coordinationv1beta1.Lease, *coordinationv1beta1.LeaseList, *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration]( "leases", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Lease { return &v1beta1.Lease{} }, - func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }), + func() *coordinationv1beta1.Lease { return &coordinationv1beta1.Lease{} }, + func() *coordinationv1beta1.LeaseList { return &coordinationv1beta1.LeaseList{} }, + gentype.PrefersProtobuf[*coordinationv1beta1.Lease](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index ab9458a5c..b8e58cd15 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ComponentStatusesGetter interface { // ComponentStatusInterface has methods to work with ComponentStatus resources. type ComponentStatusInterface interface { - Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (*v1.ComponentStatus, error) - Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (*v1.ComponentStatus, error) + Create(ctx context.Context, componentStatus *corev1.ComponentStatus, opts metav1.CreateOptions) (*corev1.ComponentStatus, error) + Update(ctx context.Context, componentStatus *corev1.ComponentStatus, opts metav1.UpdateOptions) (*corev1.ComponentStatus, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ComponentStatus, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ComponentStatusList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ComponentStatus, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ComponentStatusList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) - Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ComponentStatus, err error) + Apply(ctx context.Context, componentStatus *applyconfigurationscorev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ComponentStatus, err error) ComponentStatusExpansion } // componentStatuses implements ComponentStatusInterface type componentStatuses struct { - *gentype.ClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.ComponentStatus, *corev1.ComponentStatusList, *applyconfigurationscorev1.ComponentStatusApplyConfiguration] } // newComponentStatuses returns a ComponentStatuses func newComponentStatuses(c *CoreV1Client) *componentStatuses { return &componentStatuses{ - gentype.NewClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.ComponentStatus, *corev1.ComponentStatusList, *applyconfigurationscorev1.ComponentStatusApplyConfiguration]( "componentstatuses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ComponentStatus { return &v1.ComponentStatus{} }, - func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }), + func() *corev1.ComponentStatus { return &corev1.ComponentStatus{} }, + func() *corev1.ComponentStatusList { return &corev1.ComponentStatusList{} }, + gentype.PrefersProtobuf[*corev1.ComponentStatus](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 72aa2361f..74d321193 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ConfigMapsGetter interface { // ConfigMapInterface has methods to work with ConfigMap resources. type ConfigMapInterface interface { - Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) - Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) + Create(ctx context.Context, configMap *corev1.ConfigMap, opts metav1.CreateOptions) (*corev1.ConfigMap, error) + Update(ctx context.Context, configMap *corev1.ConfigMap, opts metav1.UpdateOptions) (*corev1.ConfigMap, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ConfigMap, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ConfigMapList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) - Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ConfigMap, err error) + Apply(ctx context.Context, configMap *applyconfigurationscorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ConfigMap, err error) ConfigMapExpansion } // configMaps implements ConfigMapInterface type configMaps struct { - *gentype.ClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration] } // newConfigMaps returns a ConfigMaps func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { return &configMaps{ - gentype.NewClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration]( "configmaps", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.ConfigMap { return &v1.ConfigMap{} }, - func() *v1.ConfigMapList { return &v1.ConfigMapList{} }), + func() *corev1.ConfigMap { return &corev1.ConfigMap{} }, + func() *corev1.ConfigMapList { return &corev1.ConfigMapList{} }, + gentype.PrefersProtobuf[*corev1.ConfigMap](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 6e59e4cc6..abf85cba6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" + corev1 "k8s.io/api/core/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -160,10 +160,10 @@ func New(c rest.Interface) *CoreV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := corev1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/api" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 9b9fc5fc1..b96a8b385 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type EndpointsGetter interface { // EndpointsInterface has methods to work with Endpoints resources. type EndpointsInterface interface { - Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error) - Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error) + Create(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.CreateOptions) (*corev1.Endpoints, error) + Update(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.UpdateOptions) (*corev1.Endpoints, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Endpoints, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.EndpointsList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) - Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error) + Apply(ctx context.Context, endpoints *applyconfigurationscorev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Endpoints, err error) EndpointsExpansion } // endpoints implements EndpointsInterface type endpoints struct { - *gentype.ClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration] } // newEndpoints returns a Endpoints func newEndpoints(c *CoreV1Client, namespace string) *endpoints { return &endpoints{ - gentype.NewClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration]( "endpoints", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Endpoints { return &v1.Endpoints{} }, - func() *v1.EndpointsList { return &v1.EndpointsList{} }), + func() *corev1.Endpoints { return &corev1.Endpoints{} }, + func() *corev1.EndpointsList { return &corev1.EndpointsList{} }, + gentype.PrefersProtobuf[*corev1.Endpoints](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 5ff0f0690..dd0cc80b8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error) - Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error) + Create(ctx context.Context, event *corev1.Event, opts metav1.CreateOptions) (*corev1.Event, error) + Update(ctx context.Context, event *corev1.Event, opts metav1.UpdateOptions) (*corev1.Event, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Event, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.EventList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) - Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Event, err error) + Apply(ctx context.Context, event *applyconfigurationscorev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Event, err error) EventExpansion } // events implements EventInterface type events struct { - *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Event, *corev1.EventList, *applyconfigurationscorev1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *CoreV1Client, namespace string) *events { return &events{ - gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Event, *corev1.EventList, *applyconfigurationscorev1.EventApplyConfiguration]( "events", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Event { return &v1.Event{} }, - func() *v1.EventList { return &v1.EventList{} }), + func() *corev1.Event { return &corev1.Event{} }, + func() *corev1.EventList { return &corev1.EventList{} }, + gentype.PrefersProtobuf[*corev1.Event](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index dbd305280..550277ed4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeComponentStatuses implements ComponentStatusInterface -type FakeComponentStatuses struct { +// fakeComponentStatuses implements ComponentStatusInterface +type fakeComponentStatuses struct { + *gentype.FakeClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration] Fake *FakeCoreV1 } -var componentstatusesResource = v1.SchemeGroupVersion.WithResource("componentstatuses") - -var componentstatusesKind = v1.SchemeGroupVersion.WithKind("ComponentStatus") - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - emptyResult := &v1.ComponentStatus{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(componentstatusesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ComponentStatus), err -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - emptyResult := &v1.ComponentStatusList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(componentstatusesResource, componentstatusesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ComponentStatusList{ListMeta: obj.(*v1.ComponentStatusList).ListMeta} - for _, item := range obj.(*v1.ComponentStatusList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *FakeComponentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(componentstatusesResource, opts)) -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { - emptyResult := &v1.ComponentStatus{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ComponentStatus), err -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { - emptyResult := &v1.ComponentStatus{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ComponentStatus), err -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(componentstatusesResource, name, opts), &v1.ComponentStatus{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(componentstatusesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) - return err -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { - emptyResult := &v1.ComponentStatus{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ComponentStatus), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { - if componentStatus == nil { - return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") - } - data, err := json.Marshal(componentStatus) - if err != nil { - return nil, err - } - name := componentStatus.Name - if name == nil { - return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") - } - emptyResult := &v1.ComponentStatus{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeComponentStatuses(fake *FakeCoreV1) typedcorev1.ComponentStatusInterface { + return &fakeComponentStatuses{ + gentype.NewFakeClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("componentstatuses"), + v1.SchemeGroupVersion.WithKind("ComponentStatus"), + func() *v1.ComponentStatus { return &v1.ComponentStatus{} }, + func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }, + func(dst, src *v1.ComponentStatusList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ComponentStatusList) []*v1.ComponentStatus { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ComponentStatusList, items []*v1.ComponentStatus) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ComponentStatus), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index ae760add7..1fd5bf40c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeConfigMaps implements ConfigMapInterface -type FakeConfigMaps struct { +// fakeConfigMaps implements ConfigMapInterface +type fakeConfigMaps struct { + *gentype.FakeClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var configmapsResource = v1.SchemeGroupVersion.WithResource("configmaps") - -var configmapsKind = v1.SchemeGroupVersion.WithKind("ConfigMap") - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *FakeConfigMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - emptyResult := &v1.ConfigMap{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(configmapsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ConfigMap), err -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - emptyResult := &v1.ConfigMapList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(configmapsResource, configmapsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ConfigMapList{ListMeta: obj.(*v1.ConfigMapList).ListMeta} - for _, item := range obj.(*v1.ConfigMapList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *FakeConfigMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(configmapsResource, c.ns, opts)) - } -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { - emptyResult := &v1.ConfigMap{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ConfigMap), err -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { - emptyResult := &v1.ConfigMap{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ConfigMap), err -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(configmapsResource, c.ns, name, opts), &v1.ConfigMap{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(configmapsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) - return err -} - -// Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { - emptyResult := &v1.ConfigMap{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ConfigMap), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { - if configMap == nil { - return nil, fmt.Errorf("configMap provided to Apply must not be nil") - } - data, err := json.Marshal(configMap) - if err != nil { - return nil, err - } - name := configMap.Name - if name == nil { - return nil, fmt.Errorf("configMap.Name must be provided to Apply") - } - emptyResult := &v1.ConfigMap{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeConfigMaps(fake *FakeCoreV1, namespace string) typedcorev1.ConfigMapInterface { + return &fakeConfigMaps{ + gentype.NewFakeClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("configmaps"), + v1.SchemeGroupVersion.WithKind("ConfigMap"), + func() *v1.ConfigMap { return &v1.ConfigMap{} }, + func() *v1.ConfigMapList { return &v1.ConfigMapList{} }, + func(dst, src *v1.ConfigMapList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ConfigMapList) []*v1.ConfigMap { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ConfigMapList, items []*v1.ConfigMap) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.ConfigMap), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go index 5ad90943c..e7f7412d0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go @@ -29,67 +29,67 @@ type FakeCoreV1 struct { } func (c *FakeCoreV1) ComponentStatuses() v1.ComponentStatusInterface { - return &FakeComponentStatuses{c} + return newFakeComponentStatuses(c) } func (c *FakeCoreV1) ConfigMaps(namespace string) v1.ConfigMapInterface { - return &FakeConfigMaps{c, namespace} + return newFakeConfigMaps(c, namespace) } func (c *FakeCoreV1) Endpoints(namespace string) v1.EndpointsInterface { - return &FakeEndpoints{c, namespace} + return newFakeEndpoints(c, namespace) } func (c *FakeCoreV1) Events(namespace string) v1.EventInterface { - return &FakeEvents{c, namespace} + return newFakeEvents(c, namespace) } func (c *FakeCoreV1) LimitRanges(namespace string) v1.LimitRangeInterface { - return &FakeLimitRanges{c, namespace} + return newFakeLimitRanges(c, namespace) } func (c *FakeCoreV1) Namespaces() v1.NamespaceInterface { - return &FakeNamespaces{c} + return newFakeNamespaces(c) } func (c *FakeCoreV1) Nodes() v1.NodeInterface { - return &FakeNodes{c} + return newFakeNodes(c) } func (c *FakeCoreV1) PersistentVolumes() v1.PersistentVolumeInterface { - return &FakePersistentVolumes{c} + return newFakePersistentVolumes(c) } func (c *FakeCoreV1) PersistentVolumeClaims(namespace string) v1.PersistentVolumeClaimInterface { - return &FakePersistentVolumeClaims{c, namespace} + return newFakePersistentVolumeClaims(c, namespace) } func (c *FakeCoreV1) Pods(namespace string) v1.PodInterface { - return &FakePods{c, namespace} + return newFakePods(c, namespace) } func (c *FakeCoreV1) PodTemplates(namespace string) v1.PodTemplateInterface { - return &FakePodTemplates{c, namespace} + return newFakePodTemplates(c, namespace) } func (c *FakeCoreV1) ReplicationControllers(namespace string) v1.ReplicationControllerInterface { - return &FakeReplicationControllers{c, namespace} + return newFakeReplicationControllers(c, namespace) } func (c *FakeCoreV1) ResourceQuotas(namespace string) v1.ResourceQuotaInterface { - return &FakeResourceQuotas{c, namespace} + return newFakeResourceQuotas(c, namespace) } func (c *FakeCoreV1) Secrets(namespace string) v1.SecretInterface { - return &FakeSecrets{c, namespace} + return newFakeSecrets(c, namespace) } func (c *FakeCoreV1) Services(namespace string) v1.ServiceInterface { - return &FakeServices{c, namespace} + return newFakeServices(c, namespace) } func (c *FakeCoreV1) ServiceAccounts(namespace string) v1.ServiceAccountInterface { - return &FakeServiceAccounts{c, namespace} + return newFakeServiceAccounts(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index 7e2e91cfa..d58c2dab4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeEndpoints implements EndpointsInterface -type FakeEndpoints struct { +// fakeEndpoints implements EndpointsInterface +type fakeEndpoints struct { + *gentype.FakeClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var endpointsResource = v1.SchemeGroupVersion.WithResource("endpoints") - -var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints") - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - emptyResult := &v1.Endpoints{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(endpointsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Endpoints), err -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - emptyResult := &v1.EndpointsList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(endpointsResource, endpointsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EndpointsList{ListMeta: obj.(*v1.EndpointsList).ListMeta} - for _, item := range obj.(*v1.EndpointsList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(endpointsResource, c.ns, opts)) - } -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { - emptyResult := &v1.Endpoints{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Endpoints), err -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { - emptyResult := &v1.Endpoints{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Endpoints), err -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &v1.Endpoints{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(endpointsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) - return err -} - -// Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { - emptyResult := &v1.Endpoints{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Endpoints), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { - if endpoints == nil { - return nil, fmt.Errorf("endpoints provided to Apply must not be nil") - } - data, err := json.Marshal(endpoints) - if err != nil { - return nil, err - } - name := endpoints.Name - if name == nil { - return nil, fmt.Errorf("endpoints.Name must be provided to Apply") - } - emptyResult := &v1.Endpoints{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeEndpoints(fake *FakeCoreV1, namespace string) typedcorev1.EndpointsInterface { + return &fakeEndpoints{ + gentype.NewFakeClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("endpoints"), + v1.SchemeGroupVersion.WithKind("Endpoints"), + func() *v1.Endpoints { return &v1.Endpoints{} }, + func() *v1.EndpointsList { return &v1.EndpointsList{} }, + func(dst, src *v1.EndpointsList) { dst.ListMeta = src.ListMeta }, + func(list *v1.EndpointsList) []*v1.Endpoints { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.EndpointsList, items []*v1.Endpoints) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Endpoints), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index a438ba473..36ee633c0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeEvents implements EventInterface -type FakeEvents struct { +// fakeEvents implements EventInterface +type fakeEvents struct { + *gentype.FakeClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var eventsResource = v1.SchemeGroupVersion.WithResource("events") - -var eventsKind = v1.SchemeGroupVersion.WithKind("Event") - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - emptyResult := &v1.EventList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EventList{ListMeta: obj.(*v1.EventList).ListMeta} - for _, item := range obj.(*v1.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) - } -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1.Event{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EventList{}) - return err -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeEvents(fake *FakeCoreV1, namespace string) typedcorev1.EventInterface { + return &fakeEvents{ + gentype.NewFakeClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("events"), + v1.SchemeGroupVersion.WithKind("Event"), + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }, + func(dst, src *v1.EventList) { dst.ListMeta = src.ListMeta }, + func(list *v1.EventList) []*v1.Event { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.EventList, items []*v1.Event) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go index 48282f86e..3840f6323 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go @@ -25,12 +25,12 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { +func (c *fakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { var action core.CreateActionImpl - if c.ns != "" { - action = core.NewCreateAction(eventsResource, c.ns, event) + if c.Namespace() != "" { + action = core.NewCreateAction(c.Resource(), c.Namespace(), event) } else { - action = core.NewCreateAction(eventsResource, event.GetNamespace(), event) + action = core.NewCreateAction(c.Resource(), event.GetNamespace(), event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -41,12 +41,12 @@ func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error } // Update replaces an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { +func (c *fakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { var action core.UpdateActionImpl - if c.ns != "" { - action = core.NewUpdateAction(eventsResource, c.ns, event) + if c.Namespace() != "" { + action = core.NewUpdateAction(c.Resource(), c.Namespace(), event) } else { - action = core.NewUpdateAction(eventsResource, event.GetNamespace(), event) + action = core.NewUpdateAction(c.Resource(), event.GetNamespace(), event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -58,14 +58,14 @@ func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error // PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. // TODO: Should take a PatchType as an argument probably. -func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) { +func (c *fakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) { // TODO: Should be configurable to support additional patch strategies. pt := types.StrategicMergePatchType var action core.PatchActionImpl - if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data) + if c.Namespace() != "" { + action = core.NewPatchAction(c.Resource(), c.Namespace(), event.Name, pt, data) } else { - action = core.NewPatchAction(eventsResource, event.GetNamespace(), event.Name, pt, data) + action = core.NewPatchAction(c.Resource(), event.GetNamespace(), event.Name, pt, data) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -76,12 +76,12 @@ func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1. } // Search returns a list of events matching the specified object. -func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { +func (c *fakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { var action core.ListActionImpl - if c.ns != "" { - action = core.NewListAction(eventsResource, eventsKind, c.ns, metav1.ListOptions{}) + if c.Namespace() != "" { + action = core.NewListAction(c.Resource(), c.Kind(), c.Namespace(), metav1.ListOptions{}) } else { - action = core.NewListAction(eventsResource, eventsKind, v1.NamespaceDefault, metav1.ListOptions{}) + action = core.NewListAction(c.Resource(), c.Kind(), v1.NamespaceDefault, metav1.ListOptions{}) } obj, err := c.Fake.Invokes(action, &v1.EventList{}) if obj == nil { @@ -91,10 +91,10 @@ func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v return obj.(*v1.EventList), err } -func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { +func (c *fakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { action := core.GenericActionImpl{} action.Verb = "get-field-selector" - action.Resource = eventsResource + action.Resource = c.Resource() c.Fake.Invokes(action, nil) return fields.Everything() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 4cc36131a..377581f10 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeLimitRanges implements LimitRangeInterface -type FakeLimitRanges struct { +// fakeLimitRanges implements LimitRangeInterface +type fakeLimitRanges struct { + *gentype.FakeClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var limitrangesResource = v1.SchemeGroupVersion.WithResource("limitranges") - -var limitrangesKind = v1.SchemeGroupVersion.WithKind("LimitRange") - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *FakeLimitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - emptyResult := &v1.LimitRange{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(limitrangesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.LimitRange), err -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - emptyResult := &v1.LimitRangeList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(limitrangesResource, limitrangesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.LimitRangeList{ListMeta: obj.(*v1.LimitRangeList).ListMeta} - for _, item := range obj.(*v1.LimitRangeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *FakeLimitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(limitrangesResource, c.ns, opts)) - } -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { - emptyResult := &v1.LimitRange{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.LimitRange), err -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { - emptyResult := &v1.LimitRange{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.LimitRange), err -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(limitrangesResource, c.ns, name, opts), &v1.LimitRange{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(limitrangesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) - return err -} - -// Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { - emptyResult := &v1.LimitRange{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.LimitRange), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { - if limitRange == nil { - return nil, fmt.Errorf("limitRange provided to Apply must not be nil") - } - data, err := json.Marshal(limitRange) - if err != nil { - return nil, err - } - name := limitRange.Name - if name == nil { - return nil, fmt.Errorf("limitRange.Name must be provided to Apply") - } - emptyResult := &v1.LimitRange{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeLimitRanges(fake *FakeCoreV1, namespace string) typedcorev1.LimitRangeInterface { + return &fakeLimitRanges{ + gentype.NewFakeClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("limitranges"), + v1.SchemeGroupVersion.WithKind("LimitRange"), + func() *v1.LimitRange { return &v1.LimitRange{} }, + func() *v1.LimitRangeList { return &v1.LimitRangeList{} }, + func(dst, src *v1.LimitRangeList) { dst.ListMeta = src.ListMeta }, + func(list *v1.LimitRangeList) []*v1.LimitRange { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.LimitRangeList, items []*v1.LimitRange) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.LimitRange), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 093990571..2e0ac2d8a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -19,160 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeNamespaces implements NamespaceInterface -type FakeNamespaces struct { +// fakeNamespaces implements NamespaceInterface +type fakeNamespaces struct { + *gentype.FakeClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration] Fake *FakeCoreV1 } -var namespacesResource = v1.SchemeGroupVersion.WithResource("namespaces") - -var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace") - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(namespacesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Namespace), err -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - emptyResult := &v1.NamespaceList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(namespacesResource, namespacesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.NamespaceList{ListMeta: obj.(*v1.NamespaceList).ListMeta} - for _, item := range obj.(*v1.NamespaceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(namespacesResource, opts)) -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(namespacesResource, namespace, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Namespace), err -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(namespacesResource, namespace, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Namespace), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(namespacesResource, "status", namespace, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Namespace), err -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &v1.Namespace{}) - return err -} - -// Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Namespace), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *FakeNamespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Namespace), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - emptyResult := &v1.Namespace{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeNamespaces(fake *FakeCoreV1) typedcorev1.NamespaceInterface { + return &fakeNamespaces{ + gentype.NewFakeClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("namespaces"), + v1.SchemeGroupVersion.WithKind("Namespace"), + func() *v1.Namespace { return &v1.Namespace{} }, + func() *v1.NamespaceList { return &v1.NamespaceList{} }, + func(dst, src *v1.NamespaceList) { dst.ListMeta = src.ListMeta }, + func(list *v1.NamespaceList) []*v1.Namespace { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.NamespaceList, items []*v1.Namespace) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Namespace), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go index d86b328a4..adc2624b5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go @@ -19,15 +19,15 @@ package fake import ( "context" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" core "k8s.io/client-go/testing" ) -func (c *FakeNamespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { +func (c *fakeNamespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { action := core.CreateActionImpl{} action.Verb = "create" - action.Resource = namespacesResource + action.Resource = c.Resource() action.Subresource = "finalize" action.Object = namespace diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 451f992da..8e59a61e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -19,168 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeNodes implements NodeInterface -type FakeNodes struct { +// fakeNodes implements NodeInterface +type fakeNodes struct { + *gentype.FakeClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration] Fake *FakeCoreV1 } -var nodesResource = v1.SchemeGroupVersion.WithResource("nodes") - -var nodesKind = v1.SchemeGroupVersion.WithKind("Node") - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(nodesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Node), err -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - emptyResult := &v1.NodeList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(nodesResource, nodesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.NodeList{ListMeta: obj.(*v1.NodeList).ListMeta} - for _, item := range obj.(*v1.NodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(nodesResource, opts)) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(nodesResource, node, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Node), err -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(nodesResource, node, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Node), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(nodesResource, "status", node, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Node), err -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &v1.Node{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(nodesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.NodeList{}) - return err -} - -// Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Node), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *FakeNodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Node), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - emptyResult := &v1.Node{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeNodes(fake *FakeCoreV1) typedcorev1.NodeInterface { + return &fakeNodes{ + gentype.NewFakeClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("nodes"), + v1.SchemeGroupVersion.WithKind("Node"), + func() *v1.Node { return &v1.Node{} }, + func() *v1.NodeList { return &v1.NodeList{} }, + func(dst, src *v1.NodeList) { dst.ListMeta = src.ListMeta }, + func(list *v1.NodeList) []*v1.Node { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.NodeList, items []*v1.Node) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Node), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go index eccf9fec6..0e5be8495 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go @@ -25,11 +25,11 @@ import ( ) // TODO: Should take a PatchType as an argument probably. -func (c *FakeNodes) PatchStatus(_ context.Context, nodeName string, data []byte) (*v1.Node, error) { +func (c *fakeNodes) PatchStatus(_ context.Context, nodeName string, data []byte) (*v1.Node, error) { // TODO: Should be configurable to support additional patch strategies. pt := types.StrategicMergePatchType obj, err := c.Fake.Invokes( - core.NewRootPatchSubresourceAction(nodesResource, nodeName, pt, data, "status"), &v1.Node{}) + core.NewRootPatchSubresourceAction(c.Resource(), nodeName, pt, data, "status"), &v1.Node{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 16a1f2201..d4cbfcbfb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -19,168 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakePersistentVolumes implements PersistentVolumeInterface -type FakePersistentVolumes struct { +// fakePersistentVolumes implements PersistentVolumeInterface +type fakePersistentVolumes struct { + *gentype.FakeClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration] Fake *FakeCoreV1 } -var persistentvolumesResource = v1.SchemeGroupVersion.WithResource("persistentvolumes") - -var persistentvolumesKind = v1.SchemeGroupVersion.WithKind("PersistentVolume") - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(persistentvolumesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolume), err -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - emptyResult := &v1.PersistentVolumeList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(persistentvolumesResource, persistentvolumesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PersistentVolumeList{ListMeta: obj.(*v1.PersistentVolumeList).ListMeta} - for _, item := range obj.(*v1.PersistentVolumeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *FakePersistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(persistentvolumesResource, opts)) -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolume), err -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolume), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(persistentvolumesResource, "status", persistentVolume, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolume), err -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(persistentvolumesResource, name, opts), &v1.PersistentVolume{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(persistentvolumesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) - return err -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolume), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolume), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - emptyResult := &v1.PersistentVolume{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePersistentVolumes(fake *FakeCoreV1) typedcorev1.PersistentVolumeInterface { + return &fakePersistentVolumes{ + gentype.NewFakeClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("persistentvolumes"), + v1.SchemeGroupVersion.WithKind("PersistentVolume"), + func() *v1.PersistentVolume { return &v1.PersistentVolume{} }, + func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }, + func(dst, src *v1.PersistentVolumeList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PersistentVolumeList) []*v1.PersistentVolume { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.PersistentVolumeList, items []*v1.PersistentVolume) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.PersistentVolume), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index 12617c243..3b2511337 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakePersistentVolumeClaims implements PersistentVolumeClaimInterface -type FakePersistentVolumeClaims struct { +// fakePersistentVolumeClaims implements PersistentVolumeClaimInterface +type fakePersistentVolumeClaims struct { + *gentype.FakeClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var persistentvolumeclaimsResource = v1.SchemeGroupVersion.WithResource("persistentvolumeclaims") - -var persistentvolumeclaimsKind = v1.SchemeGroupVersion.WithKind("PersistentVolumeClaim") - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(persistentvolumeclaimsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - emptyResult := &v1.PersistentVolumeClaimList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PersistentVolumeClaimList{ListMeta: obj.(*v1.PersistentVolumeClaimList).ListMeta} - for _, item := range obj.(*v1.PersistentVolumeClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(persistentvolumeclaimsResource, c.ns, opts)) - -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolumeClaim), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(persistentvolumeclaimsResource, c.ns, name, opts), &v1.PersistentVolumeClaim{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(persistentvolumeclaimsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) - return err -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - emptyResult := &v1.PersistentVolumeClaim{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakePersistentVolumeClaims(fake *FakeCoreV1, namespace string) typedcorev1.PersistentVolumeClaimInterface { + return &fakePersistentVolumeClaims{ + gentype.NewFakeClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), + v1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"), + func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} }, + func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }, + func(dst, src *v1.PersistentVolumeClaimList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PersistentVolumeClaimList) []*v1.PersistentVolumeClaim { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.PersistentVolumeClaimList, items []*v1.PersistentVolumeClaim) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.PersistentVolumeClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index d2b46e8e3..7d353bcf1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -19,176 +19,44 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" + context "context" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" testing "k8s.io/client-go/testing" ) -// FakePods implements PodInterface -type FakePods struct { +// fakePods implements PodInterface +type fakePods struct { + *gentype.FakeClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration] Fake *FakeCoreV1 - ns string } -var podsResource = v1.SchemeGroupVersion.WithResource("pods") - -var podsKind = v1.SchemeGroupVersion.WithKind("Pod") - -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { - emptyResult := &v1.Pod{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(podsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Pod), err -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { - emptyResult := &v1.PodList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(podsResource, podsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PodList{ListMeta: obj.(*v1.PodList).ListMeta} - for _, item := range obj.(*v1.PodList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(podsResource, c.ns, opts)) - -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { - emptyResult := &v1.Pod{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Pod), err -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - emptyResult := &v1.Pod{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Pod), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - emptyResult := &v1.Pod{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "status", c.ns, pod, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Pod), err -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &v1.Pod{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(podsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PodList{}) - return err -} - -// Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { - emptyResult := &v1.Pod{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Pod), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *FakePods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - emptyResult := &v1.Pod{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakePods(fake *FakeCoreV1, namespace string) typedcorev1.PodInterface { + return &fakePods{ + gentype.NewFakeClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("pods"), + v1.SchemeGroupVersion.WithKind("Pod"), + func() *v1.Pod { return &v1.Pod{} }, + func() *v1.PodList { return &v1.PodList{} }, + func(dst, src *v1.PodList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PodList) []*v1.Pod { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.PodList, items []*v1.Pod) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Pod), err } -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } +// UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *fakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "ephemeralcontainers", c.Namespace(), pod, opts), &v1.Pod{}) if obj == nil { return emptyResult, err @@ -196,11 +64,11 @@ func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfigur return obj.(*v1.Pod), err } -// UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { +// UpdateResize takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *fakePods) UpdateResize(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "ephemeralcontainers", c.ns, pod, opts), &v1.Pod{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "resize", c.Namespace(), pod, opts), &v1.Pod{}) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go index c814cadb0..3fbb89ad4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go @@ -33,11 +33,11 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakePods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { +func (c *fakePods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { action := core.CreateActionImpl{} action.Verb = "create" action.Namespace = binding.Namespace - action.Resource = podsResource + action.Resource = c.Resource() action.Subresource = "binding" action.Object = binding @@ -45,9 +45,9 @@ func (c *FakePods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.Cr return err } -func (c *FakePods) GetBinding(name string) (result *v1.Binding, err error) { +func (c *fakePods) GetBinding(name string) (result *v1.Binding, err error) { obj, err := c.Fake. - Invokes(core.NewGetSubresourceAction(podsResource, c.ns, "binding", name), &v1.Binding{}) + Invokes(core.NewGetSubresourceAction(c.Resource(), c.Namespace(), "binding", name), &v1.Binding{}) if obj == nil { return nil, err @@ -55,11 +55,11 @@ func (c *FakePods) GetBinding(name string) (result *v1.Binding, err error) { return obj.(*v1.Binding), err } -func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { +func (c *fakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { action := core.GenericActionImpl{} action.Verb = "get" - action.Namespace = c.ns - action.Resource = podsResource + action.Namespace = c.Namespace() + action.Resource = c.Resource() action.Subresource = "log" action.Value = opts @@ -73,21 +73,21 @@ func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Requ return resp, nil }), NegotiatedSerializer: scheme.Codecs.WithoutConversion(), - GroupVersion: podsKind.GroupVersion(), - VersionedAPIPath: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/log", c.ns, name), + GroupVersion: c.Kind().GroupVersion(), + VersionedAPIPath: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/log", c.Namespace(), name), } return fakeClient.Request() } -func (c *FakePods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { +func (c *fakePods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { return c.EvictV1beta1(ctx, eviction) } -func (c *FakePods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { +func (c *fakePods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Namespace = c.ns - action.Resource = podsResource + action.Namespace = c.Namespace() + action.Resource = c.Resource() action.Subresource = "eviction" action.Object = eviction @@ -95,11 +95,11 @@ func (c *FakePods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) err return err } -func (c *FakePods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { +func (c *fakePods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Namespace = c.ns - action.Resource = podsResource + action.Namespace = c.Namespace() + action.Resource = c.Resource() action.Subresource = "eviction" action.Object = eviction @@ -107,6 +107,6 @@ func (c *FakePods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Evi return err } -func (c *FakePods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - return c.Fake.InvokesProxy(core.NewProxyGetAction(podsResource, c.ns, scheme, name, port, path, params)) +func (c *fakePods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + return c.Fake.InvokesProxy(core.NewProxyGetAction(c.Resource(), c.Namespace(), scheme, name, port, path, params)) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index dc9affdd0..d3ff2c412 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakePodTemplates implements PodTemplateInterface -type FakePodTemplates struct { +// fakePodTemplates implements PodTemplateInterface +type fakePodTemplates struct { + *gentype.FakeClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var podtemplatesResource = v1.SchemeGroupVersion.WithResource("podtemplates") - -var podtemplatesKind = v1.SchemeGroupVersion.WithKind("PodTemplate") - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *FakePodTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - emptyResult := &v1.PodTemplate{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(podtemplatesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodTemplate), err -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - emptyResult := &v1.PodTemplateList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(podtemplatesResource, podtemplatesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PodTemplateList{ListMeta: obj.(*v1.PodTemplateList).ListMeta} - for _, item := range obj.(*v1.PodTemplateList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *FakePodTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(podtemplatesResource, c.ns, opts)) - } -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { - emptyResult := &v1.PodTemplate{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodTemplate), err -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { - emptyResult := &v1.PodTemplate{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodTemplate), err -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podtemplatesResource, c.ns, name, opts), &v1.PodTemplate{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(podtemplatesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) - return err -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { - emptyResult := &v1.PodTemplate{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodTemplate), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { - if podTemplate == nil { - return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") - } - data, err := json.Marshal(podTemplate) - if err != nil { - return nil, err - } - name := podTemplate.Name - if name == nil { - return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") - } - emptyResult := &v1.PodTemplate{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakePodTemplates(fake *FakeCoreV1, namespace string) typedcorev1.PodTemplateInterface { + return &fakePodTemplates{ + gentype.NewFakeClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("podtemplates"), + v1.SchemeGroupVersion.WithKind("PodTemplate"), + func() *v1.PodTemplate { return &v1.PodTemplate{} }, + func() *v1.PodTemplateList { return &v1.PodTemplateList{} }, + func(dst, src *v1.PodTemplateList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PodTemplateList) []*v1.PodTemplate { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.PodTemplateList, items []*v1.PodTemplate) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.PodTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 6b3497f08..454f09962 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -19,189 +19,49 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" + context "context" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" testing "k8s.io/client-go/testing" ) -// FakeReplicationControllers implements ReplicationControllerInterface -type FakeReplicationControllers struct { +// fakeReplicationControllers implements ReplicationControllerInterface +type fakeReplicationControllers struct { + *gentype.FakeClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration] Fake *FakeCoreV1 - ns string } -var replicationcontrollersResource = v1.SchemeGroupVersion.WithResource("replicationcontrollers") - -var replicationcontrollersKind = v1.SchemeGroupVersion.WithKind("ReplicationController") - -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(replicationcontrollersResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicationController), err -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - emptyResult := &v1.ReplicationControllerList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ReplicationControllerList{ListMeta: obj.(*v1.ReplicationControllerList).ListMeta} - for _, item := range obj.(*v1.ReplicationControllerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *FakeReplicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(replicationcontrollersResource, c.ns, opts)) - -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicationController), err -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicationController), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "status", c.ns, replicationController, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicationController), err -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicationcontrollersResource, c.ns, name, opts), &v1.ReplicationController{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(replicationcontrollersResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) - return err -} - -// Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicationController), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ReplicationController), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - emptyResult := &v1.ReplicationController{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeReplicationControllers(fake *FakeCoreV1, namespace string) typedcorev1.ReplicationControllerInterface { + return &fakeReplicationControllers{ + gentype.NewFakeClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("replicationcontrollers"), + v1.SchemeGroupVersion.WithKind("ReplicationController"), + func() *v1.ReplicationController { return &v1.ReplicationController{} }, + func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }, + func(dst, src *v1.ReplicationControllerList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ReplicationControllerList) []*v1.ReplicationController { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ReplicationControllerList, items []*v1.ReplicationController) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ReplicationController), err } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(replicationcontrollersResource, c.ns, "scale", replicationControllerName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", replicationControllerName, options), emptyResult) if obj == nil { return emptyResult, err @@ -210,10 +70,10 @@ func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationCo } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *fakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &autoscalingv1.Scale{}) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 5e2e02afc..4c98389d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeResourceQuotas implements ResourceQuotaInterface -type FakeResourceQuotas struct { +// fakeResourceQuotas implements ResourceQuotaInterface +type fakeResourceQuotas struct { + *gentype.FakeClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var resourcequotasResource = v1.SchemeGroupVersion.WithResource("resourcequotas") - -var resourcequotasKind = v1.SchemeGroupVersion.WithKind("ResourceQuota") - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(resourcequotasResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ResourceQuota), err -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - emptyResult := &v1.ResourceQuotaList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(resourcequotasResource, resourcequotasKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ResourceQuotaList{ListMeta: obj.(*v1.ResourceQuotaList).ListMeta} - for _, item := range obj.(*v1.ResourceQuotaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *FakeResourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(resourcequotasResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ResourceQuota), err -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ResourceQuota), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(resourcequotasResource, "status", c.ns, resourceQuota, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ResourceQuota), err -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourcequotasResource, c.ns, name, opts), &v1.ResourceQuota{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(resourcequotasResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) - return err -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ResourceQuota), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ResourceQuota), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - emptyResult := &v1.ResourceQuota{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeResourceQuotas(fake *FakeCoreV1, namespace string) typedcorev1.ResourceQuotaInterface { + return &fakeResourceQuotas{ + gentype.NewFakeClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("resourcequotas"), + v1.SchemeGroupVersion.WithKind("ResourceQuota"), + func() *v1.ResourceQuota { return &v1.ResourceQuota{} }, + func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }, + func(dst, src *v1.ResourceQuotaList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ResourceQuotaList) []*v1.ResourceQuota { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ResourceQuotaList, items []*v1.ResourceQuota) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ResourceQuota), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index ec0fc65b5..779c12c38 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeSecrets implements SecretInterface -type FakeSecrets struct { +// fakeSecrets implements SecretInterface +type fakeSecrets struct { + *gentype.FakeClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var secretsResource = v1.SchemeGroupVersion.WithResource("secrets") - -var secretsKind = v1.SchemeGroupVersion.WithKind("Secret") - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { - emptyResult := &v1.Secret{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(secretsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Secret), err -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { - emptyResult := &v1.SecretList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(secretsResource, secretsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.SecretList{ListMeta: obj.(*v1.SecretList).ListMeta} - for _, item := range obj.(*v1.SecretList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(secretsResource, c.ns, opts)) - } -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { - emptyResult := &v1.Secret{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Secret), err -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { - emptyResult := &v1.Secret{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Secret), err -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &v1.Secret{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(secretsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.SecretList{}) - return err -} - -// Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { - emptyResult := &v1.Secret{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Secret), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *FakeSecrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { - if secret == nil { - return nil, fmt.Errorf("secret provided to Apply must not be nil") - } - data, err := json.Marshal(secret) - if err != nil { - return nil, err - } - name := secret.Name - if name == nil { - return nil, fmt.Errorf("secret.Name must be provided to Apply") - } - emptyResult := &v1.Secret{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeSecrets(fake *FakeCoreV1, namespace string) typedcorev1.SecretInterface { + return &fakeSecrets{ + gentype.NewFakeClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("secrets"), + v1.SchemeGroupVersion.WithKind("Secret"), + func() *v1.Secret { return &v1.Secret{} }, + func() *v1.SecretList { return &v1.SecretList{} }, + func(dst, src *v1.SecretList) { dst.ListMeta = src.ListMeta }, + func(list *v1.SecretList) []*v1.Secret { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.SecretList, items []*v1.Secret) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Secret), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 2a3cf45fb..6bab944a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -19,171 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// FakeServices implements ServiceInterface -type FakeServices struct { +// fakeServices implements ServiceInterface +type fakeServices struct { + *gentype.FakeClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration] Fake *FakeCoreV1 - ns string -} - -var servicesResource = v1.SchemeGroupVersion.WithResource("services") - -var servicesKind = v1.SchemeGroupVersion.WithKind("Service") - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(servicesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Service), err -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { - emptyResult := &v1.ServiceList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(servicesResource, servicesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ServiceList{ListMeta: obj.(*v1.ServiceList).ListMeta} - for _, item := range obj.(*v1.ServiceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(servicesResource, c.ns, opts)) - -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Service), err } -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Service), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(servicesResource, "status", c.ns, service, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Service), err -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &v1.Service{}) - - return err -} - -// Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Service), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *FakeServices) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Service), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeServices) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - emptyResult := &v1.Service{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeServices(fake *FakeCoreV1, namespace string) typedcorev1.ServiceInterface { + return &fakeServices{ + gentype.NewFakeClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("services"), + v1.SchemeGroupVersion.WithKind("Service"), + func() *v1.Service { return &v1.Service{} }, + func() *v1.ServiceList { return &v1.ServiceList{} }, + func(dst, src *v1.ServiceList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ServiceList) []*v1.Service { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ServiceList, items []*v1.Service) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Service), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go index 92e4930d7..ebd39c460 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go @@ -21,6 +21,6 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - return c.Fake.InvokesProxy(core.NewProxyGetAction(servicesResource, c.ns, scheme, name, port, path, params)) +func (c *fakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + return c.Fake.InvokesProxy(core.NewProxyGetAction(c.Resource(), c.Namespace(), scheme, name, port, path, params)) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index f3ad8d40f..76df9d62b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -19,152 +19,47 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" + context "context" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" testing "k8s.io/client-go/testing" ) -// FakeServiceAccounts implements ServiceAccountInterface -type FakeServiceAccounts struct { +// fakeServiceAccounts implements ServiceAccountInterface +type fakeServiceAccounts struct { + *gentype.FakeClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration] Fake *FakeCoreV1 - ns string } -var serviceaccountsResource = v1.SchemeGroupVersion.WithResource("serviceaccounts") - -var serviceaccountsKind = v1.SchemeGroupVersion.WithKind("ServiceAccount") - -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - emptyResult := &v1.ServiceAccount{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(serviceaccountsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ServiceAccount), err -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - emptyResult := &v1.ServiceAccountList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(serviceaccountsResource, serviceaccountsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ServiceAccountList{ListMeta: obj.(*v1.ServiceAccountList).ListMeta} - for _, item := range obj.(*v1.ServiceAccountList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *FakeServiceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(serviceaccountsResource, c.ns, opts)) - -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { - emptyResult := &v1.ServiceAccount{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ServiceAccount), err -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { - emptyResult := &v1.ServiceAccount{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ServiceAccount), err -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(serviceaccountsResource, c.ns, name, opts), &v1.ServiceAccount{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(serviceaccountsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) - return err -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { - emptyResult := &v1.ServiceAccount{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ServiceAccount), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { - if serviceAccount == nil { - return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") - } - data, err := json.Marshal(serviceAccount) - if err != nil { - return nil, err - } - name := serviceAccount.Name - if name == nil { - return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") - } - emptyResult := &v1.ServiceAccount{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeServiceAccounts(fake *FakeCoreV1, namespace string) typedcorev1.ServiceAccountInterface { + return &fakeServiceAccounts{ + gentype.NewFakeClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("serviceaccounts"), + v1.SchemeGroupVersion.WithKind("ServiceAccount"), + func() *v1.ServiceAccount { return &v1.ServiceAccount{} }, + func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }, + func(dst, src *v1.ServiceAccountList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ServiceAccountList) []*v1.ServiceAccount { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ServiceAccountList, items []*v1.ServiceAccount) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ServiceAccount), err } // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. -func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { +func (c *fakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { emptyResult := &authenticationv1.TokenRequest{} obj, err := c.Fake. - Invokes(testing.NewCreateSubresourceActionWithOptions(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest, opts), emptyResult) + Invokes(testing.NewCreateSubresourceActionWithOptions(c.Resource(), serviceAccountName, "token", c.Namespace(), tokenRequest, opts), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index f8e4048f9..51fa11d1b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type LimitRangesGetter interface { // LimitRangeInterface has methods to work with LimitRange resources. type LimitRangeInterface interface { - Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (*v1.LimitRange, error) - Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (*v1.LimitRange, error) + Create(ctx context.Context, limitRange *corev1.LimitRange, opts metav1.CreateOptions) (*corev1.LimitRange, error) + Update(ctx context.Context, limitRange *corev1.LimitRange, opts metav1.UpdateOptions) (*corev1.LimitRange, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.LimitRange, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.LimitRangeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.LimitRange, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.LimitRangeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) - Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.LimitRange, err error) + Apply(ctx context.Context, limitRange *applyconfigurationscorev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.LimitRange, err error) LimitRangeExpansion } // limitRanges implements LimitRangeInterface type limitRanges struct { - *gentype.ClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.LimitRange, *corev1.LimitRangeList, *applyconfigurationscorev1.LimitRangeApplyConfiguration] } // newLimitRanges returns a LimitRanges func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { return &limitRanges{ - gentype.NewClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.LimitRange, *corev1.LimitRangeList, *applyconfigurationscorev1.LimitRangeApplyConfiguration]( "limitranges", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.LimitRange { return &v1.LimitRange{} }, - func() *v1.LimitRangeList { return &v1.LimitRangeList{} }), + func() *corev1.LimitRange { return &corev1.LimitRange{} }, + func() *corev1.LimitRangeList { return &corev1.LimitRangeList{} }, + gentype.PrefersProtobuf[*corev1.LimitRange](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 75d20648f..323934938 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,35 +38,37 @@ type NamespacesGetter interface { // NamespaceInterface has methods to work with Namespace resources. type NamespaceInterface interface { - Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) - Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + Create(ctx context.Context, namespace *corev1.Namespace, opts metav1.CreateOptions) (*corev1.Namespace, error) + Update(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + UpdateStatus(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Namespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.NamespaceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) - Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error) + Apply(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Namespace, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) + ApplyStatus(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Namespace, err error) NamespaceExpansion } // namespaces implements NamespaceInterface type namespaces struct { - *gentype.ClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Namespace, *corev1.NamespaceList, *applyconfigurationscorev1.NamespaceApplyConfiguration] } // newNamespaces returns a Namespaces func newNamespaces(c *CoreV1Client) *namespaces { return &namespaces{ - gentype.NewClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Namespace, *corev1.NamespaceList, *applyconfigurationscorev1.NamespaceApplyConfiguration]( "namespaces", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Namespace { return &v1.Namespace{} }, - func() *v1.NamespaceList { return &v1.NamespaceList{} }), + func() *corev1.Namespace { return &corev1.Namespace{} }, + func() *corev1.NamespaceList { return &corev1.NamespaceList{} }, + gentype.PrefersProtobuf[*corev1.Namespace](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index df1a7817f..1851b025f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type NodesGetter interface { // NodeInterface has methods to work with Node resources. type NodeInterface interface { - Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) - Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + Create(ctx context.Context, node *corev1.Node, opts metav1.CreateOptions) (*corev1.Node, error) + Update(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + UpdateStatus(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Node, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.NodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) - Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Node, err error) + Apply(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Node, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + ApplyStatus(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Node, *corev1.NodeList, *applyconfigurationscorev1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *CoreV1Client) *nodes { return &nodes{ - gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Node, *corev1.NodeList, *applyconfigurationscorev1.NodeApplyConfiguration]( "nodes", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.Node { return &v1.Node{} }, - func() *v1.NodeList { return &v1.NodeList{} }), + func() *corev1.Node { return &corev1.Node{} }, + func() *corev1.NodeList { return &corev1.NodeList{} }, + gentype.PrefersProtobuf[*corev1.Node](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index 8be40f866..077a1ba4f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type PersistentVolumesGetter interface { // PersistentVolumeInterface has methods to work with PersistentVolume resources. type PersistentVolumeInterface interface { - Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) - Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + Create(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.CreateOptions) (*corev1.PersistentVolume, error) + Update(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.UpdateOptions) (*corev1.PersistentVolume, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.UpdateOptions) (*corev1.PersistentVolume, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolume, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PersistentVolume, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PersistentVolumeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) - Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PersistentVolume, err error) + Apply(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolume, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) + ApplyStatus(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolume, err error) PersistentVolumeExpansion } // persistentVolumes implements PersistentVolumeInterface type persistentVolumes struct { - *gentype.ClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration] } // newPersistentVolumes returns a PersistentVolumes func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { return &persistentVolumes{ - gentype.NewClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration]( "persistentvolumes", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.PersistentVolume { return &v1.PersistentVolume{} }, - func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }), + func() *corev1.PersistentVolume { return &corev1.PersistentVolume{} }, + func() *corev1.PersistentVolumeList { return &corev1.PersistentVolumeList{} }, + gentype.PrefersProtobuf[*corev1.PersistentVolume](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 7721b0092..cbe75e812 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type PersistentVolumeClaimsGetter interface { // PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. type PersistentVolumeClaimInterface interface { - Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) - Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.CreateOptions) (*corev1.PersistentVolumeClaim, error) + Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PersistentVolumeClaim, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PersistentVolumeClaimList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) - Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) + Apply(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) + ApplyStatus(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } // persistentVolumeClaims implements PersistentVolumeClaimInterface type persistentVolumeClaims struct { - *gentype.ClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration] } // newPersistentVolumeClaims returns a PersistentVolumeClaims func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { return &persistentVolumeClaims{ - gentype.NewClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration]( "persistentvolumeclaims", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} }, - func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }), + func() *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{} }, + func() *corev1.PersistentVolumeClaimList { return &corev1.PersistentVolumeClaimList{} }, + gentype.PrefersProtobuf[*corev1.PersistentVolumeClaim](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 470b7de7b..072a55941 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,46 +38,50 @@ type PodsGetter interface { // PodInterface has methods to work with Pod resources. type PodInterface interface { - Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) - Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + Create(ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) + Update(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + UpdateStatus(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) - Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Pod, err error) + Apply(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Pod, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) - UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + ApplyStatus(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Pod, err error) + UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) + UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) PodExpansion } // pods implements PodInterface type pods struct { - *gentype.ClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration] } // newPods returns a Pods func newPods(c *CoreV1Client, namespace string) *pods { return &pods{ - gentype.NewClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration]( "pods", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Pod { return &v1.Pod{} }, - func() *v1.PodList { return &v1.PodList{} }), + func() *corev1.Pod { return &corev1.Pod{} }, + func() *corev1.PodList { return &corev1.PodList{} }, + gentype.PrefersProtobuf[*corev1.Pod](), + ), } } // UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} +func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) { + result = &corev1.Pod{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("pods"). Name(podName). @@ -88,3 +92,19 @@ func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, po Into(result) return } + +// UpdateResize takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) { + result = &corev1.Pod{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). + Resource("pods"). + Name(podName). + SubResource("resize"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pod). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index 060a05909..b0cfa1bc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type PodTemplatesGetter interface { // PodTemplateInterface has methods to work with PodTemplate resources. type PodTemplateInterface interface { - Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (*v1.PodTemplate, error) - Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (*v1.PodTemplate, error) + Create(ctx context.Context, podTemplate *corev1.PodTemplate, opts metav1.CreateOptions) (*corev1.PodTemplate, error) + Update(ctx context.Context, podTemplate *corev1.PodTemplate, opts metav1.UpdateOptions) (*corev1.PodTemplate, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodTemplate, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PodTemplateList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PodTemplate, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodTemplateList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) - Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PodTemplate, err error) + Apply(ctx context.Context, podTemplate *applyconfigurationscorev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PodTemplate, err error) PodTemplateExpansion } // podTemplates implements PodTemplateInterface type podTemplates struct { - *gentype.ClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.PodTemplate, *corev1.PodTemplateList, *applyconfigurationscorev1.PodTemplateApplyConfiguration] } // newPodTemplates returns a PodTemplates func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { return &podTemplates{ - gentype.NewClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.PodTemplate, *corev1.PodTemplateList, *applyconfigurationscorev1.PodTemplateApplyConfiguration]( "podtemplates", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.PodTemplate { return &v1.PodTemplate{} }, - func() *v1.PodTemplateList { return &v1.PodTemplateList{} }), + func() *corev1.PodTemplate { return &corev1.PodTemplate{} }, + func() *corev1.PodTemplateList { return &corev1.PodTemplateList{} }, + gentype.PrefersProtobuf[*corev1.PodTemplate](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 9b275ed1b..f8a7c9285 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - "context" + context "context" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -39,19 +39,19 @@ type ReplicationControllersGetter interface { // ReplicationControllerInterface has methods to work with ReplicationController resources. type ReplicationControllerInterface interface { - Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) - Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + Create(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.CreateOptions) (*corev1.ReplicationController, error) + Update(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.UpdateOptions) (*corev1.ReplicationController, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.UpdateOptions) (*corev1.ReplicationController, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicationController, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicationControllerList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ReplicationController, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ReplicationControllerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) - Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ReplicationController, err error) + Apply(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ReplicationController, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) + ApplyStatus(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ReplicationController, err error) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -60,19 +60,21 @@ type ReplicationControllerInterface interface { // replicationControllers implements ReplicationControllerInterface type replicationControllers struct { - *gentype.ClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.ReplicationController, *corev1.ReplicationControllerList, *applyconfigurationscorev1.ReplicationControllerApplyConfiguration] } // newReplicationControllers returns a ReplicationControllers func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { return &replicationControllers{ - gentype.NewClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.ReplicationController, *corev1.ReplicationControllerList, *applyconfigurationscorev1.ReplicationControllerApplyConfiguration]( "replicationcontrollers", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.ReplicationController { return &v1.ReplicationController{} }, - func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }), + func() *corev1.ReplicationController { return &corev1.ReplicationController{} }, + func() *corev1.ReplicationControllerList { return &corev1.ReplicationControllerList{} }, + gentype.PrefersProtobuf[*corev1.ReplicationController](), + ), } } @@ -80,6 +82,7 @@ func newReplicationControllers(c *CoreV1Client, namespace string) *replicationCo func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). @@ -94,6 +97,7 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 4b2dcd3b5..a0435accc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type ResourceQuotasGetter interface { // ResourceQuotaInterface has methods to work with ResourceQuota resources. type ResourceQuotaInterface interface { - Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) - Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + Create(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.CreateOptions) (*corev1.ResourceQuota, error) + Update(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.UpdateOptions) (*corev1.ResourceQuota, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.UpdateOptions) (*corev1.ResourceQuota, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResourceQuota, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ResourceQuotaList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ResourceQuota, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ResourceQuotaList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) - Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ResourceQuota, err error) + Apply(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ResourceQuota, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) + ApplyStatus(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ResourceQuota, err error) ResourceQuotaExpansion } // resourceQuotas implements ResourceQuotaInterface type resourceQuotas struct { - *gentype.ClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.ResourceQuota, *corev1.ResourceQuotaList, *applyconfigurationscorev1.ResourceQuotaApplyConfiguration] } // newResourceQuotas returns a ResourceQuotas func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { return &resourceQuotas{ - gentype.NewClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.ResourceQuota, *corev1.ResourceQuotaList, *applyconfigurationscorev1.ResourceQuotaApplyConfiguration]( "resourcequotas", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.ResourceQuota { return &v1.ResourceQuota{} }, - func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }), + func() *corev1.ResourceQuota { return &corev1.ResourceQuota{} }, + func() *corev1.ResourceQuotaList { return &corev1.ResourceQuotaList{} }, + gentype.PrefersProtobuf[*corev1.ResourceQuota](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 12a8d1178..a7ab56a27 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type SecretsGetter interface { // SecretInterface has methods to work with Secret resources. type SecretInterface interface { - Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error) - Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) + Create(ctx context.Context, secret *corev1.Secret, opts metav1.CreateOptions) (*corev1.Secret, error) + Update(ctx context.Context, secret *corev1.Secret, opts metav1.UpdateOptions) (*corev1.Secret, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Secret, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.SecretList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) - Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Secret, err error) + Apply(ctx context.Context, secret *applyconfigurationscorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Secret, err error) SecretExpansion } // secrets implements SecretInterface type secrets struct { - *gentype.ClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration] } // newSecrets returns a Secrets func newSecrets(c *CoreV1Client, namespace string) *secrets { return &secrets{ - gentype.NewClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration]( "secrets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Secret { return &v1.Secret{} }, - func() *v1.SecretList { return &v1.SecretList{} }), + func() *corev1.Secret { return &corev1.Secret{} }, + func() *corev1.SecretList { return &corev1.SecretList{} }, + gentype.PrefersProtobuf[*corev1.Secret](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index ec935a324..f145a137c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,35 +38,37 @@ type ServicesGetter interface { // ServiceInterface has methods to work with Service resources. type ServiceInterface interface { - Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) - Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + Create(ctx context.Context, service *corev1.Service, opts metav1.CreateOptions) (*corev1.Service, error) + Update(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + UpdateStatus(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Service, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) - Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Service, err error) + Apply(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Service, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) + ApplyStatus(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Service, err error) ServiceExpansion } // services implements ServiceInterface type services struct { - *gentype.ClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration] } // newServices returns a Services func newServices(c *CoreV1Client, namespace string) *services { return &services{ - gentype.NewClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration]( "services", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Service { return &v1.Service{} }, - func() *v1.ServiceList { return &v1.ServiceList{} }), + func() *corev1.Service { return &corev1.Service{} }, + func() *corev1.ServiceList { return &corev1.ServiceList{} }, + gentype.PrefersProtobuf[*corev1.Service](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index eb995d454..8458b6d9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - "context" + context "context" authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -39,15 +39,15 @@ type ServiceAccountsGetter interface { // ServiceAccountInterface has methods to work with ServiceAccount resources. type ServiceAccountInterface interface { - Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (*v1.ServiceAccount, error) - Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (*v1.ServiceAccount, error) + Create(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts metav1.CreateOptions) (*corev1.ServiceAccount, error) + Update(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts metav1.UpdateOptions) (*corev1.ServiceAccount, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceAccount, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceAccountList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ServiceAccount, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceAccountList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) - Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ServiceAccount, err error) + Apply(ctx context.Context, serviceAccount *applyconfigurationscorev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ServiceAccount, err error) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (*authenticationv1.TokenRequest, error) ServiceAccountExpansion @@ -55,19 +55,21 @@ type ServiceAccountInterface interface { // serviceAccounts implements ServiceAccountInterface type serviceAccounts struct { - *gentype.ClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration] + *gentype.ClientWithListAndApply[*corev1.ServiceAccount, *corev1.ServiceAccountList, *applyconfigurationscorev1.ServiceAccountApplyConfiguration] } // newServiceAccounts returns a ServiceAccounts func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { return &serviceAccounts{ - gentype.NewClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]( + gentype.NewClientWithListAndApply[*corev1.ServiceAccount, *corev1.ServiceAccountList, *applyconfigurationscorev1.ServiceAccountApplyConfiguration]( "serviceaccounts", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.ServiceAccount { return &v1.ServiceAccount{} }, - func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }), + func() *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, + func() *corev1.ServiceAccountList { return &corev1.ServiceAccountList{} }, + gentype.PrefersProtobuf[*corev1.ServiceAccount](), + ), } } @@ -75,6 +77,7 @@ func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { result = &authenticationv1.TokenRequest{} err = c.GetClient().Post(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("serviceaccounts"). Name(serviceAccountName). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go index 9041443b3..fbc685df8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/discovery/v1" - "k8s.io/client-go/kubernetes/scheme" + discoveryv1 "k8s.io/api/discovery/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *DiscoveryV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := discoveryv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go index 1f927055c..75b9a559e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/discovery/v1" + discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (*v1.EndpointSlice, error) - Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (*v1.EndpointSlice, error) + Create(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.CreateOptions) (*discoveryv1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.UpdateOptions) (*discoveryv1.EndpointSlice, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.EndpointSlice, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointSliceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*discoveryv1.EndpointSlice, error) + List(ctx context.Context, opts metav1.ListOptions) (*discoveryv1.EndpointSliceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) - Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *discoveryv1.EndpointSlice, err error) + Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *discoveryv1.EndpointSlice, err error) EndpointSliceExpansion } // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - *gentype.ClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration] + *gentype.ClientWithListAndApply[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList, *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices { return &endpointSlices{ - gentype.NewClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]( + gentype.NewClientWithListAndApply[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList, *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration]( "endpointslices", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.EndpointSlice { return &v1.EndpointSlice{} }, - func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }), + func() *discoveryv1.EndpointSlice { return &discoveryv1.EndpointSlice{} }, + func() *discoveryv1.EndpointSliceList { return &discoveryv1.EndpointSliceList{} }, + gentype.PrefersProtobuf[*discoveryv1.EndpointSlice](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go index 1ca9b23f5..b64eabdad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go @@ -29,7 +29,7 @@ type FakeDiscoveryV1 struct { } func (c *FakeDiscoveryV1) EndpointSlices(namespace string) v1.EndpointSliceInterface { - return &FakeEndpointSlices{c, namespace} + return newFakeEndpointSlices(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go index 6bbbde82e..a2b048fd9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go @@ -19,142 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typeddiscoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" ) -// FakeEndpointSlices implements EndpointSliceInterface -type FakeEndpointSlices struct { +// fakeEndpointSlices implements EndpointSliceInterface +type fakeEndpointSlices struct { + *gentype.FakeClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration] Fake *FakeDiscoveryV1 - ns string -} - -var endpointslicesResource = v1.SchemeGroupVersion.WithResource("endpointslices") - -var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice") - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { - emptyResult := &v1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.EndpointSlice), err -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { - emptyResult := &v1.EndpointSliceList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EndpointSliceList{ListMeta: obj.(*v1.EndpointSliceList).ListMeta} - for _, item := range obj.(*v1.EndpointSliceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts)) - } -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { - emptyResult := &v1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.EndpointSlice), err -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { - emptyResult := &v1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.EndpointSlice), err -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1.EndpointSlice{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{}) - return err -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { - emptyResult := &v1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.EndpointSlice), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - emptyResult := &v1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeEndpointSlices(fake *FakeDiscoveryV1, namespace string) typeddiscoveryv1.EndpointSliceInterface { + return &fakeEndpointSlices{ + gentype.NewFakeClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("endpointslices"), + v1.SchemeGroupVersion.WithKind("EndpointSlice"), + func() *v1.EndpointSlice { return &v1.EndpointSlice{} }, + func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }, + func(dst, src *v1.EndpointSliceList) { dst.ListMeta = src.ListMeta }, + func(list *v1.EndpointSliceList) []*v1.EndpointSlice { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.EndpointSliceList, items []*v1.EndpointSlice) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.EndpointSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go index 193d5e9eb..908446c6d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/discovery/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *DiscoveryV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := discoveryv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index 298cfbc87..4ef2752e7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/discovery/v1beta1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + applyconfigurationsdiscoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error) - Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error) + Create(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSlice, opts v1.CreateOptions) (*discoveryv1beta1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSlice, opts v1.UpdateOptions) (*discoveryv1beta1.EndpointSlice, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*discoveryv1beta1.EndpointSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*discoveryv1beta1.EndpointSliceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) - Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *discoveryv1beta1.EndpointSlice, err error) + Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *discoveryv1beta1.EndpointSlice, err error) EndpointSliceExpansion } // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - *gentype.ClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration] + *gentype.ClientWithListAndApply[*discoveryv1beta1.EndpointSlice, *discoveryv1beta1.EndpointSliceList, *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { return &endpointSlices{ - gentype.NewClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]( + gentype.NewClientWithListAndApply[*discoveryv1beta1.EndpointSlice, *discoveryv1beta1.EndpointSliceList, *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration]( "endpointslices", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} }, - func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }), + func() *discoveryv1beta1.EndpointSlice { return &discoveryv1beta1.EndpointSlice{} }, + func() *discoveryv1beta1.EndpointSliceList { return &discoveryv1beta1.EndpointSliceList{} }, + gentype.PrefersProtobuf[*discoveryv1beta1.EndpointSlice](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go index e285de647..53fdfe62e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go @@ -29,7 +29,7 @@ type FakeDiscoveryV1beta1 struct { } func (c *FakeDiscoveryV1beta1) EndpointSlices(namespace string) v1beta1.EndpointSliceInterface { - return &FakeEndpointSlices{c, namespace} + return newFakeEndpointSlices(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index 65cf69b9d..b36aeb4d0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/discovery/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typeddiscoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" ) -// FakeEndpointSlices implements EndpointSliceInterface -type FakeEndpointSlices struct { +// fakeEndpointSlices implements EndpointSliceInterface +type fakeEndpointSlices struct { + *gentype.FakeClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration] Fake *FakeDiscoveryV1beta1 - ns string -} - -var endpointslicesResource = v1beta1.SchemeGroupVersion.WithResource("endpointslices") - -var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice") - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { - emptyResult := &v1beta1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { - emptyResult := &v1beta1.EndpointSliceList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.EndpointSliceList{ListMeta: obj.(*v1beta1.EndpointSliceList).ListMeta} - for _, item := range obj.(*v1beta1.EndpointSliceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts)) - } -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { - emptyResult := &v1beta1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { - emptyResult := &v1beta1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1beta1.EndpointSlice{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) - return err -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { - emptyResult := &v1beta1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - emptyResult := &v1beta1.EndpointSlice{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeEndpointSlices(fake *FakeDiscoveryV1beta1, namespace string) typeddiscoveryv1beta1.EndpointSliceInterface { + return &fakeEndpointSlices{ + gentype.NewFakeClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("endpointslices"), + v1beta1.SchemeGroupVersion.WithKind("EndpointSlice"), + func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} }, + func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }, + func(dst, src *v1beta1.EndpointSliceList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.EndpointSliceList) []*v1beta1.EndpointSlice { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.EndpointSliceList, items []*v1beta1.EndpointSlice) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.EndpointSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go index d021a76c4..fd3358476 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/events/v1" + eventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error) - Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error) + Create(ctx context.Context, event *eventsv1.Event, opts metav1.CreateOptions) (*eventsv1.Event, error) + Update(ctx context.Context, event *eventsv1.Event, opts metav1.UpdateOptions) (*eventsv1.Event, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*eventsv1.Event, error) + List(ctx context.Context, opts metav1.ListOptions) (*eventsv1.EventList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) - Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *eventsv1.Event, err error) + Apply(ctx context.Context, event *applyconfigurationseventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *eventsv1.Event, err error) EventExpansion } // events implements EventInterface type events struct { - *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration] + *gentype.ClientWithListAndApply[*eventsv1.Event, *eventsv1.EventList, *applyconfigurationseventsv1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1Client, namespace string) *events { return &events{ - gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]( + gentype.NewClientWithListAndApply[*eventsv1.Event, *eventsv1.EventList, *applyconfigurationseventsv1.EventApplyConfiguration]( "events", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Event { return &v1.Event{} }, - func() *v1.EventList { return &v1.EventList{} }), + func() *eventsv1.Event { return &eventsv1.Event{} }, + func() *eventsv1.EventList { return &eventsv1.EventList{} }, + gentype.PrefersProtobuf[*eventsv1.Event](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go index 8c73918d1..959ff5f81 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/events/v1" - "k8s.io/client-go/kubernetes/scheme" + eventsv1 "k8s.io/api/events/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *EventsV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := eventsv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go index 1e79eb984..b9f176d14 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/events/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" ) -// FakeEvents implements EventInterface -type FakeEvents struct { +// fakeEvents implements EventInterface +type fakeEvents struct { + *gentype.FakeClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration] Fake *FakeEventsV1 - ns string -} - -var eventsResource = v1.SchemeGroupVersion.WithResource("events") - -var eventsKind = v1.SchemeGroupVersion.WithKind("Event") - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - emptyResult := &v1.EventList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EventList{ListMeta: obj.(*v1.EventList).ListMeta} - for _, item := range obj.(*v1.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) - } -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1.Event{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EventList{}) - return err -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Event), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - emptyResult := &v1.Event{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeEvents(fake *FakeEventsV1, namespace string) typedeventsv1.EventInterface { + return &fakeEvents{ + gentype.NewFakeClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("events"), + v1.SchemeGroupVersion.WithKind("Event"), + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }, + func(dst, src *v1.EventList) { dst.ListMeta = src.ListMeta }, + func(list *v1.EventList) []*v1.Event { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.EventList, items []*v1.Event) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go index 95ef2b307..ab0ca22b4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go @@ -29,7 +29,7 @@ type FakeEventsV1 struct { } func (c *FakeEventsV1) Events(namespace string) v1.EventInterface { - return &FakeEvents{c, namespace} + return newFakeEvents(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index 77ca2e775..c18a1aeb6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/events/v1beta1" + eventsv1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + applyconfigurationseventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (*v1beta1.Event, error) - Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (*v1beta1.Event, error) + Create(ctx context.Context, event *eventsv1beta1.Event, opts v1.CreateOptions) (*eventsv1beta1.Event, error) + Update(ctx context.Context, event *eventsv1beta1.Event, opts v1.UpdateOptions) (*eventsv1beta1.Event, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Event, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*eventsv1beta1.Event, error) + List(ctx context.Context, opts v1.ListOptions) (*eventsv1beta1.EventList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) - Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *eventsv1beta1.Event, err error) + Apply(ctx context.Context, event *applyconfigurationseventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *eventsv1beta1.Event, err error) EventExpansion } // events implements EventInterface type events struct { - *gentype.ClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration] + *gentype.ClientWithListAndApply[*eventsv1beta1.Event, *eventsv1beta1.EventList, *applyconfigurationseventsv1beta1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1beta1Client, namespace string) *events { return &events{ - gentype.NewClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]( + gentype.NewClientWithListAndApply[*eventsv1beta1.Event, *eventsv1beta1.EventList, *applyconfigurationseventsv1beta1.EventApplyConfiguration]( "events", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Event { return &v1beta1.Event{} }, - func() *v1beta1.EventList { return &v1beta1.EventList{} }), + func() *eventsv1beta1.Event { return &eventsv1beta1.Event{} }, + func() *eventsv1beta1.EventList { return &eventsv1beta1.EventList{} }, + gentype.PrefersProtobuf[*eventsv1beta1.Event](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index 66506bf88..0bfc3cb60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/events/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + eventsv1beta1 "k8s.io/api/events/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *EventsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := eventsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index b00f2126a..9c9a57326 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/events/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" ) -// FakeEvents implements EventInterface -type FakeEvents struct { +// fakeEvents implements EventInterface +type fakeEvents struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration] Fake *FakeEventsV1beta1 - ns string -} - -var eventsResource = v1beta1.SchemeGroupVersion.WithResource("events") - -var eventsKind = v1beta1.SchemeGroupVersion.WithKind("Event") - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - emptyResult := &v1beta1.Event{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Event), err -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { - emptyResult := &v1beta1.EventList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.EventList{ListMeta: obj.(*v1beta1.EventList).ListMeta} - for _, item := range obj.(*v1beta1.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) - } -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { - emptyResult := &v1beta1.Event{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Event), err -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { - emptyResult := &v1beta1.Event{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Event), err -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1beta1.Event{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.EventList{}) - return err -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { - emptyResult := &v1beta1.Event{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Event), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - emptyResult := &v1beta1.Event{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeEvents(fake *FakeEventsV1beta1, namespace string) typedeventsv1beta1.EventInterface { + return &fakeEvents{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("events"), + v1beta1.SchemeGroupVersion.WithKind("Event"), + func() *v1beta1.Event { return &v1beta1.Event{} }, + func() *v1beta1.EventList { return &v1beta1.EventList{} }, + func(dst, src *v1beta1.EventList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.EventList) []*v1beta1.Event { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.EventList, items []*v1beta1.Event) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1beta1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go index 19c1b4415..248ff6ea1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go @@ -23,10 +23,10 @@ import ( ) // CreateWithEventNamespace creats a new event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - action := core.NewRootCreateAction(eventsResource, event) - if c.ns != "" { - action = core.NewCreateAction(eventsResource, c.ns, event) +func (c *fakeEvents) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + action := core.NewRootCreateAction(c.Resource(), event) + if c.Namespace() != "" { + action = core.NewCreateAction(c.Resource(), c.Namespace(), event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -37,10 +37,10 @@ func (c *FakeEvents) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Ev } // UpdateWithEventNamespace replaces an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - action := core.NewRootUpdateAction(eventsResource, event) - if c.ns != "" { - action = core.NewUpdateAction(eventsResource, c.ns, event) +func (c *fakeEvents) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + action := core.NewRootUpdateAction(c.Resource(), event) + if c.Namespace() != "" { + action = core.NewUpdateAction(c.Resource(), c.Namespace(), event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -51,11 +51,11 @@ func (c *FakeEvents) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Ev } // PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { +func (c *fakeEvents) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { pt := types.StrategicMergePatchType - action := core.NewRootPatchAction(eventsResource, event.Name, pt, data) - if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data) + action := core.NewRootPatchAction(c.Resource(), event.Name, pt, data) + if c.Namespace() != "" { + action = core.NewPatchAction(c.Resource(), c.Namespace(), event.Name, pt, data) } obj, err := c.Fake.Invokes(action, event) if obj == nil { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go index 875c774e3..0b4db4d51 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go @@ -29,7 +29,7 @@ type FakeEventsV1beta1 struct { } func (c *FakeEventsV1beta1) Events(namespace string) v1beta1.EventInterface { - return &FakeEvents{c, namespace} + return newFakeEvents(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index f86194bf0..c04be73a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) - Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + Create(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.CreateOptions) (*extensionsv1beta1.DaemonSet, error) + Update(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.UpdateOptions) (*extensionsv1beta1.DaemonSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.UpdateOptions) (*extensionsv1beta1.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.DaemonSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.DaemonSet, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.DaemonSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) - Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.DaemonSet, err error) + Apply(ctx context.Context, daemonSet *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.DaemonSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) + ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - *gentype.ClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration] + *gentype.ClientWithListAndApply[*extensionsv1beta1.DaemonSet, *extensionsv1beta1.DaemonSetList, *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { return &daemonSets{ - gentype.NewClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*extensionsv1beta1.DaemonSet, *extensionsv1beta1.DaemonSetList, *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration]( "daemonsets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} }, - func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }), + func() *extensionsv1beta1.DaemonSet { return &extensionsv1beta1.DaemonSet{} }, + func() *extensionsv1beta1.DaemonSetList { return &extensionsv1beta1.DaemonSetList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.DaemonSet](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 021fbb3b3..1bcf3cbc8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" + context "context" + fmt "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" + apply "k8s.io/client-go/util/apply" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -40,48 +40,51 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) - Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Create(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.CreateOptions) (*extensionsv1beta1.Deployment, error) + Update(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.UpdateOptions) (*extensionsv1beta1.Deployment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.UpdateOptions) (*extensionsv1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) - Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Deployment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) - GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) - ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error) + ApplyStatus(ctx context.Context, deployment *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Deployment, err error) + GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*extensionsv1beta1.Scale, error) + UpdateScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (*extensionsv1beta1.Scale, error) + ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*extensionsv1beta1.Scale, error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration] + *gentype.ClientWithListAndApply[*extensionsv1beta1.Deployment, *extensionsv1beta1.DeploymentList, *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { return &deployments{ - gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]( + gentype.NewClientWithListAndApply[*extensionsv1beta1.Deployment, *extensionsv1beta1.DeploymentList, *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration]( "deployments", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, - func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), + func() *extensionsv1beta1.Deployment { return &extensionsv1beta1.Deployment{} }, + func() *extensionsv1beta1.DeploymentList { return &extensionsv1beta1.DeploymentList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.Deployment](), + ), } } -// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +// GetScale takes name of the deployment, and returns the corresponding extensionsv1beta1.Scale object, and an error if there is any. +func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). @@ -93,9 +96,10 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). @@ -109,24 +113,24 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { +func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } - result = &v1beta1.Scale{} - err = c.GetClient().Patch(types.ApplyPatchType). + result = &extensionsv1beta1.Scale{} + err = request. + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 4725d2cd1..88f2279bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *ExtensionsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := extensionsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index f14943082..6b6244ca8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" ) -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { +// fakeDaemonSets implements DaemonSetInterface +type fakeDaemonSets struct { + *gentype.FakeClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration] Fake *FakeExtensionsV1beta1 - ns string -} - -var daemonsetsResource = v1beta1.SchemeGroupVersion.WithResource("daemonsets") - -var daemonsetsKind = v1beta1.SchemeGroupVersion.WithKind("DaemonSet") - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - emptyResult := &v1beta1.DaemonSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.DaemonSetList{ListMeta: obj.(*v1beta1.DaemonSetList).ListMeta} - for _, item := range obj.(*v1beta1.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.DaemonSet), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1beta1.DaemonSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) - return err -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.DaemonSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDaemonSets(fake *FakeExtensionsV1beta1, namespace string) typedextensionsv1beta1.DaemonSetInterface { + return &fakeDaemonSets{ + gentype.NewFakeClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("daemonsets"), + v1beta1.SchemeGroupVersion.WithKind("DaemonSet"), + func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} }, + func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }, + func(dst, src *v1beta1.DaemonSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.DaemonSetList) []*v1beta1.DaemonSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.DaemonSetList, items []*v1beta1.DaemonSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index b81d4a96c..c8fd8281a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -19,188 +19,49 @@ limitations under the License. package fake import ( - "context" + context "context" json "encoding/json" - "fmt" + fmt "fmt" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" + typedextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" testing "k8s.io/client-go/testing" ) -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { +// fakeDeployments implements DeploymentInterface +type fakeDeployments struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration] Fake *FakeExtensionsV1beta1 - ns string } -var deploymentsResource = v1beta1.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - emptyResult := &v1beta1.DeploymentList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta} - for _, item := range obj.(*v1beta1.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta1.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - emptyResult := &v1beta1.Deployment{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeDeployments(fake *FakeExtensionsV1beta1, namespace string) typedextensionsv1beta1.DeploymentInterface { + return &fakeDeployments{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("deployments"), + v1beta1.SchemeGroupVersion.WithKind("Deployment"), + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }, + func(dst, src *v1beta1.DeploymentList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.DeploymentList) []*v1beta1.Deployment { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.DeploymentList, items []*v1beta1.Deployment) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *fakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", deploymentName, options), emptyResult) if obj == nil { return emptyResult, err @@ -209,10 +70,10 @@ func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { +func (c *fakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &v1beta1.Scale{}) if obj == nil { return emptyResult, err @@ -222,7 +83,7 @@ func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { +func (c *fakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } @@ -232,7 +93,7 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, } emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) + Invokes(testing.NewPatchSubresourceActionWithOptions(c.Resource(), c.Namespace(), deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go index 6ea1acd85..faa1cc810 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go @@ -24,10 +24,10 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakeDeployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { +func (c *fakeDeployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Resource = deploymentsResource + action.Resource = c.Resource() action.Subresource = "rollback" action.Object = deploymentRollback diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index a54c182ea..87a5ed86e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -29,23 +29,23 @@ type FakeExtensionsV1beta1 struct { } func (c *FakeExtensionsV1beta1) DaemonSets(namespace string) v1beta1.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} + return newFakeDaemonSets(c, namespace) } func (c *FakeExtensionsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { - return &FakeDeployments{c, namespace} + return newFakeDeployments(c, namespace) } func (c *FakeExtensionsV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { - return &FakeIngresses{c, namespace} + return newFakeIngresses(c, namespace) } func (c *FakeExtensionsV1beta1) NetworkPolicies(namespace string) v1beta1.NetworkPolicyInterface { - return &FakeNetworkPolicies{c, namespace} + return newFakeNetworkPolicies(c, namespace) } func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} + return newFakeReplicaSets(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index ae95682fc..aaa43d78a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" ) -// FakeIngresses implements IngressInterface -type FakeIngresses struct { +// fakeIngresses implements IngressInterface +type fakeIngresses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration] Fake *FakeExtensionsV1beta1 - ns string -} - -var ingressesResource = v1beta1.SchemeGroupVersion.WithResource("ingresses") - -var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - emptyResult := &v1beta1.IngressList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IngressList{ListMeta: obj.(*v1beta1.IngressList).ListMeta} - for _, item := range obj.(*v1beta1.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) - -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1beta1.Ingress{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) - return err -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeIngresses(fake *FakeExtensionsV1beta1, namespace string) typedextensionsv1beta1.IngressInterface { + return &fakeIngresses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("ingresses"), + v1beta1.SchemeGroupVersion.WithKind("Ingress"), + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }, + func(dst, src *v1beta1.IngressList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.IngressList) []*v1beta1.Ingress { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.IngressList, items []*v1beta1.Ingress) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index d829a0c63..30f53b997 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" ) -// FakeNetworkPolicies implements NetworkPolicyInterface -type FakeNetworkPolicies struct { +// fakeNetworkPolicies implements NetworkPolicyInterface +type fakeNetworkPolicies struct { + *gentype.FakeClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration] Fake *FakeExtensionsV1beta1 - ns string -} - -var networkpoliciesResource = v1beta1.SchemeGroupVersion.WithResource("networkpolicies") - -var networkpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy") - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - emptyResult := &v1beta1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - emptyResult := &v1beta1.NetworkPolicyList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.NetworkPolicyList{ListMeta: obj.(*v1beta1.NetworkPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.NetworkPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts)) - } -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { - emptyResult := &v1beta1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - emptyResult := &v1beta1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1beta1.NetworkPolicy{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.NetworkPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - emptyResult := &v1beta1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - emptyResult := &v1beta1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeNetworkPolicies(fake *FakeExtensionsV1beta1, namespace string) typedextensionsv1beta1.NetworkPolicyInterface { + return &fakeNetworkPolicies{ + gentype.NewFakeClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("networkpolicies"), + v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy"), + func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} }, + func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }, + func(dst, src *v1beta1.NetworkPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.NetworkPolicyList) []*v1beta1.NetworkPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.NetworkPolicyList, items []*v1beta1.NetworkPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.NetworkPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 5d94ba73b..86ae1621d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -19,188 +19,49 @@ limitations under the License. package fake import ( - "context" + context "context" json "encoding/json" - "fmt" + fmt "fmt" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" + typedextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" testing "k8s.io/client-go/testing" ) -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { +// fakeReplicaSets implements ReplicaSetInterface +type fakeReplicaSets struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration] Fake *FakeExtensionsV1beta1 - ns string } -var replicasetsResource = v1beta1.SchemeGroupVersion.WithResource("replicasets") - -var replicasetsKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet") - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - emptyResult := &v1beta1.ReplicaSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ReplicaSetList{ListMeta: obj.(*v1beta1.ReplicaSetList).ListMeta} - for _, item := range obj.(*v1beta1.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) - -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1beta1.ReplicaSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) - return err -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.ReplicaSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeReplicaSets(fake *FakeExtensionsV1beta1, namespace string) typedextensionsv1beta1.ReplicaSetInterface { + return &fakeReplicaSets{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("replicasets"), + v1beta1.SchemeGroupVersion.WithKind("ReplicaSet"), + func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} }, + func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }, + func(dst, src *v1beta1.ReplicaSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ReplicaSetList) []*v1beta1.ReplicaSet { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.ReplicaSetList, items []*v1beta1.ReplicaSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *fakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult) + Invokes(testing.NewGetSubresourceActionWithOptions(c.Resource(), c.Namespace(), "scale", replicaSetName, options), emptyResult) if obj == nil { return emptyResult, err @@ -209,10 +70,10 @@ func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { +func (c *fakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.Resource(), "scale", c.Namespace(), scale, opts), &v1beta1.Scale{}) if obj == nil { return emptyResult, err @@ -222,7 +83,7 @@ func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { +func (c *fakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } @@ -232,7 +93,7 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, } emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) + Invokes(testing.NewPatchSubresourceActionWithOptions(c.Resource(), c.Namespace(), replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { return emptyResult, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index 4511c93fc..9a24621ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) - Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Create(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.CreateOptions) (*extensionsv1beta1.Ingress, error) + Update(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.UpdateOptions) (*extensionsv1beta1.Ingress, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.UpdateOptions) (*extensionsv1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.Ingress, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.IngressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) - Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Ingress, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + ApplyStatus(ctx context.Context, ingress *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration] + *gentype.ClientWithListAndApply[*extensionsv1beta1.Ingress, *extensionsv1beta1.IngressList, *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { return &ingresses{ - gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]( + gentype.NewClientWithListAndApply[*extensionsv1beta1.Ingress, *extensionsv1beta1.IngressList, *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration]( "ingresses", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, - func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), + func() *extensionsv1beta1.Ingress { return &extensionsv1beta1.Ingress{} }, + func() *extensionsv1beta1.IngressList { return &extensionsv1beta1.IngressList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.Ingress](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index afa8203c3..ce6a45a27 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error) - Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) + Create(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicy, opts v1.CreateOptions) (*extensionsv1beta1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicy, opts v1.UpdateOptions) (*extensionsv1beta1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.NetworkPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.NetworkPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) - Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.NetworkPolicy, err error) + Apply(ctx context.Context, networkPolicy *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.NetworkPolicy, err error) NetworkPolicyExpansion } // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - *gentype.ClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*extensionsv1beta1.NetworkPolicy, *extensionsv1beta1.NetworkPolicyList, *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { return &networkPolicies{ - gentype.NewClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*extensionsv1beta1.NetworkPolicy, *extensionsv1beta1.NetworkPolicyList, *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration]( "networkpolicies", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} }, - func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }), + func() *extensionsv1beta1.NetworkPolicy { return &extensionsv1beta1.NetworkPolicy{} }, + func() *extensionsv1beta1.NetworkPolicyList { return &extensionsv1beta1.NetworkPolicyList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.NetworkPolicy](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 8973948f3..f918be417 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" + context "context" + fmt "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" + apply "k8s.io/client-go/util/apply" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -40,48 +40,51 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) - Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + Create(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.CreateOptions) (*extensionsv1beta1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.UpdateOptions) (*extensionsv1beta1.ReplicaSet, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.UpdateOptions) (*extensionsv1beta1.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ReplicaSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.ReplicaSet, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.ReplicaSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) - Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.ReplicaSet, err error) + Apply(ctx context.Context, replicaSet *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.ReplicaSet, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) - GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) - ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error) + ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.ReplicaSet, err error) + GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*extensionsv1beta1.Scale, error) + UpdateScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (*extensionsv1beta1.Scale, error) + ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*extensionsv1beta1.Scale, error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - *gentype.ClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration] + *gentype.ClientWithListAndApply[*extensionsv1beta1.ReplicaSet, *extensionsv1beta1.ReplicaSetList, *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { return &replicaSets{ - gentype.NewClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]( + gentype.NewClientWithListAndApply[*extensionsv1beta1.ReplicaSet, *extensionsv1beta1.ReplicaSetList, *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration]( "replicasets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} }, - func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }), + func() *extensionsv1beta1.ReplicaSet { return &extensionsv1beta1.ReplicaSet{} }, + func() *extensionsv1beta1.ReplicaSetList { return &extensionsv1beta1.ReplicaSetList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.ReplicaSet](), + ), } } -// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +// GetScale takes name of the replicaSet, and returns the corresponding extensionsv1beta1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} err = c.GetClient().Get(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). @@ -93,9 +96,10 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} err = c.GetClient().Put(). + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). @@ -109,24 +113,24 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { +func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } - result = &v1beta1.Scale{} - err = c.GetClient().Patch(types.ApplyPatchType). + result = &extensionsv1beta1.Scale{} + err = request. + UseProtobufAsDefault(). Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go index d15f4b242..76238d617 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go @@ -29,11 +29,11 @@ type FakeFlowcontrolV1 struct { } func (c *FakeFlowcontrolV1) FlowSchemas() v1.FlowSchemaInterface { - return &FakeFlowSchemas{c} + return newFakeFlowSchemas(c) } func (c *FakeFlowcontrolV1) PriorityLevelConfigurations() v1.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} + return newFakePriorityLevelConfigurations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go index bf2b63fb2..6f9405f51 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go @@ -19,168 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" ) -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { +// fakeFlowSchemas implements FlowSchemaInterface +type fakeFlowSchemas struct { + *gentype.FakeClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration] Fake *FakeFlowcontrolV1 } -var flowschemasResource = v1.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { - emptyResult := &v1.FlowSchemaList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.FlowSchemaList{ListMeta: obj.(*v1.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeFlowSchemas(fake *FakeFlowcontrolV1) typedflowcontrolv1.FlowSchemaInterface { + return &fakeFlowSchemas{ + gentype.NewFakeClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("flowschemas"), + v1.SchemeGroupVersion.WithKind("FlowSchema"), + func() *v1.FlowSchema { return &v1.FlowSchema{} }, + func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }, + func(dst, src *v1.FlowSchemaList) { dst.ListMeta = src.ListMeta }, + func(list *v1.FlowSchemaList) []*v1.FlowSchema { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.FlowSchemaList, items []*v1.FlowSchema) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go index 053de56ed..a4be23679 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" ) -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { +// fakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type fakePriorityLevelConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration] Fake *FakeFlowcontrolV1 } -var prioritylevelconfigurationsResource = v1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { - emptyResult := &v1.PriorityLevelConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PriorityLevelConfigurationList{ListMeta: obj.(*v1.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityLevelConfigurations(fake *FakeFlowcontrolV1) typedflowcontrolv1.PriorityLevelConfigurationInterface { + return &fakePriorityLevelConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"), + v1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"), + func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} }, + func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }, + func(dst, src *v1.PriorityLevelConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PriorityLevelConfigurationList) []*v1.PriorityLevelConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.PriorityLevelConfigurationList, items []*v1.PriorityLevelConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go index 3d7d93ef1..3b19586e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := flowcontrolv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go index 2606cee07..56d4d8065 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.CreateOptions) (*flowcontrolv1.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.UpdateOptions) (*flowcontrolv1.FlowSchema, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.UpdateOptions) (*flowcontrolv1.FlowSchema, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*flowcontrolv1.FlowSchema, error) + List(ctx context.Context, opts metav1.ListOptions) (*flowcontrolv1.FlowSchemaList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *flowcontrolv1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.FlowSchema, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - *gentype.ClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1.FlowSchema, *flowcontrolv1.FlowSchemaList, *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { return &flowSchemas{ - gentype.NewClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1.FlowSchema, *flowcontrolv1.FlowSchemaList, *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration]( "flowschemas", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.FlowSchema { return &v1.FlowSchema{} }, - func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }), + func() *flowcontrolv1.FlowSchema { return &flowcontrolv1.FlowSchema{} }, + func() *flowcontrolv1.FlowSchemaList { return &flowcontrolv1.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1.FlowSchema](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go index 64907af60..5d25f393a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,40 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*flowcontrolv1.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *flowcontrolv1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.PriorityLevelConfiguration, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - *gentype.ClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1.PriorityLevelConfiguration, *flowcontrolv1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - gentype.NewClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1.PriorityLevelConfiguration, *flowcontrolv1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( "prioritylevelconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} }, - func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }), + func() *flowcontrolv1.PriorityLevelConfiguration { return &flowcontrolv1.PriorityLevelConfiguration{} }, + func() *flowcontrolv1.PriorityLevelConfigurationList { + return &flowcontrolv1.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1.PriorityLevelConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go index 1bd58d088..b70c07cde 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go @@ -29,11 +29,11 @@ type FakeFlowcontrolV1beta1 struct { } func (c *FakeFlowcontrolV1beta1) FlowSchemas() v1beta1.FlowSchemaInterface { - return &FakeFlowSchemas{c} + return newFakeFlowSchemas(c) } func (c *FakeFlowcontrolV1beta1) PriorityLevelConfigurations() v1beta1.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} + return newFakePriorityLevelConfigurations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go index 8b4435a8a..600f740c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -19,168 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" ) -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { +// fakeFlowSchemas implements FlowSchemaInterface +type fakeFlowSchemas struct { + *gentype.FakeClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration] Fake *FakeFlowcontrolV1beta1 } -var flowschemasResource = v1beta1.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1beta1.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { - emptyResult := &v1beta1.FlowSchemaList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.FlowSchemaList{ListMeta: obj.(*v1beta1.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1beta1.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta1.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1beta1.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeFlowSchemas(fake *FakeFlowcontrolV1beta1) typedflowcontrolv1beta1.FlowSchemaInterface { + return &fakeFlowSchemas{ + gentype.NewFakeClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("flowschemas"), + v1beta1.SchemeGroupVersion.WithKind("FlowSchema"), + func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} }, + func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }, + func(dst, src *v1beta1.FlowSchemaList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.FlowSchemaList) []*v1beta1.FlowSchema { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.FlowSchemaList, items []*v1beta1.FlowSchema) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go index e139e4dce..170c4df9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" ) -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { +// fakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type fakePriorityLevelConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] Fake *FakeFlowcontrolV1beta1 } -var prioritylevelconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { - emptyResult := &v1beta1.PriorityLevelConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta1.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1beta1.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta1.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta1.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityLevelConfigurations(fake *FakeFlowcontrolV1beta1) typedflowcontrolv1beta1.PriorityLevelConfigurationInterface { + return &fakePriorityLevelConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"), + v1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"), + func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} }, + func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }, + func(dst, src *v1beta1.PriorityLevelConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.PriorityLevelConfigurationList) []*v1beta1.PriorityLevelConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.PriorityLevelConfigurationList, items []*v1beta1.PriorityLevelConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go index c29cfca95..ac3f5ffe8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := flowcontrolv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go index 3c6805b9b..f0def3947 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta1.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta1.FlowSchema, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta1.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlowSchemaList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta1.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.FlowSchema, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - *gentype.ClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1beta1.FlowSchema, *flowcontrolv1beta1.FlowSchemaList, *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { return &flowSchemas{ - gentype.NewClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1beta1.FlowSchema, *flowcontrolv1beta1.FlowSchemaList, *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration]( "flowschemas", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} }, - func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }), + func() *flowcontrolv1beta1.FlowSchema { return &flowcontrolv1beta1.FlowSchema{} }, + func() *flowcontrolv1beta1.FlowSchemaList { return &flowcontrolv1beta1.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1beta1.FlowSchema](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go index 049f4049d..15ee1b8b5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta1.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - *gentype.ClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1beta1.PriorityLevelConfiguration, *flowcontrolv1beta1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - gentype.NewClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1beta1.PriorityLevelConfiguration, *flowcontrolv1beta1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( "prioritylevelconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} }, - func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }), + func() *flowcontrolv1beta1.PriorityLevelConfiguration { + return &flowcontrolv1beta1.PriorityLevelConfiguration{} + }, + func() *flowcontrolv1beta1.PriorityLevelConfigurationList { + return &flowcontrolv1beta1.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1beta1.PriorityLevelConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go index 9f36b3b7a..1114568d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go @@ -29,11 +29,11 @@ type FakeFlowcontrolV1beta2 struct { } func (c *FakeFlowcontrolV1beta2) FlowSchemas() v1beta2.FlowSchemaInterface { - return &FakeFlowSchemas{c} + return newFakeFlowSchemas(c) } func (c *FakeFlowcontrolV1beta2) PriorityLevelConfigurations() v1beta2.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} + return newFakePriorityLevelConfigurations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go index 41cad9b7a..73ed29687 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go @@ -19,168 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" ) -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { +// fakeFlowSchemas implements FlowSchemaInterface +type fakeFlowSchemas struct { + *gentype.FakeClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration] Fake *FakeFlowcontrolV1beta2 } -var flowschemasResource = v1beta2.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1beta2.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { - emptyResult := &v1beta2.FlowSchemaList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.FlowSchemaList{ListMeta: obj.(*v1beta2.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1beta2.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta2.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1beta2.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeFlowSchemas(fake *FakeFlowcontrolV1beta2) typedflowcontrolv1beta2.FlowSchemaInterface { + return &fakeFlowSchemas{ + gentype.NewFakeClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]( + fake.Fake, + "", + v1beta2.SchemeGroupVersion.WithResource("flowschemas"), + v1beta2.SchemeGroupVersion.WithKind("FlowSchema"), + func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} }, + func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }, + func(dst, src *v1beta2.FlowSchemaList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.FlowSchemaList) []*v1beta2.FlowSchema { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta2.FlowSchemaList, items []*v1beta2.FlowSchema) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go index f9eac85d5..8ebb58e36 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" ) -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { +// fakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type fakePriorityLevelConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] Fake *FakeFlowcontrolV1beta2 } -var prioritylevelconfigurationsResource = v1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { - emptyResult := &v1beta2.PriorityLevelConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta2.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1beta2.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta2.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta2.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityLevelConfigurations(fake *FakeFlowcontrolV1beta2) typedflowcontrolv1beta2.PriorityLevelConfigurationInterface { + return &fakePriorityLevelConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( + fake.Fake, + "", + v1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations"), + v1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"), + func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} }, + func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }, + func(dst, src *v1beta2.PriorityLevelConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta2.PriorityLevelConfigurationList) []*v1beta2.PriorityLevelConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta2.PriorityLevelConfigurationList, items []*v1beta2.PriorityLevelConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta2.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go index f3cca4fc7..7652d4f39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - "net/http" + http "net/http" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta2.SchemeGroupVersion + gv := flowcontrolv1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go index 270615762..780cf030e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta2.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta2.FlowSchema, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta2.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.FlowSchemaList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta2.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta2.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta2.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.FlowSchema, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - *gentype.ClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1beta2.FlowSchema, *flowcontrolv1beta2.FlowSchemaList, *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { return &flowSchemas{ - gentype.NewClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1beta2.FlowSchema, *flowcontrolv1beta2.FlowSchemaList, *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration]( "flowschemas", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} }, - func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }), + func() *flowcontrolv1beta2.FlowSchema { return &flowcontrolv1beta2.FlowSchema{} }, + func() *flowcontrolv1beta2.FlowSchemaList { return &flowcontrolv1beta2.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1beta2.FlowSchema](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go index 00ead4c60..65b9feafa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta2.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - *gentype.ClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1beta2.PriorityLevelConfiguration, *flowcontrolv1beta2.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - gentype.NewClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1beta2.PriorityLevelConfiguration, *flowcontrolv1beta2.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( "prioritylevelconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} }, - func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }), + func() *flowcontrolv1beta2.PriorityLevelConfiguration { + return &flowcontrolv1beta2.PriorityLevelConfiguration{} + }, + func() *flowcontrolv1beta2.PriorityLevelConfigurationList { + return &flowcontrolv1beta2.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1beta2.PriorityLevelConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go index 1cb0198d0..8f13bc94c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go @@ -29,11 +29,11 @@ type FakeFlowcontrolV1beta3 struct { } func (c *FakeFlowcontrolV1beta3) FlowSchemas() v1beta3.FlowSchemaInterface { - return &FakeFlowSchemas{c} + return newFakeFlowSchemas(c) } func (c *FakeFlowcontrolV1beta3) PriorityLevelConfigurations() v1beta3.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} + return newFakePriorityLevelConfigurations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go index 70dca796a..887e8c97c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go @@ -19,168 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" ) -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { +// fakeFlowSchemas implements FlowSchemaInterface +type fakeFlowSchemas struct { + *gentype.FakeClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration] Fake *FakeFlowcontrolV1beta3 } -var flowschemasResource = v1beta3.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1beta3.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { - emptyResult := &v1beta3.FlowSchemaList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta3.FlowSchemaList{ListMeta: obj.(*v1beta3.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1beta3.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta3.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta3.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - emptyResult := &v1beta3.FlowSchema{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeFlowSchemas(fake *FakeFlowcontrolV1beta3) typedflowcontrolv1beta3.FlowSchemaInterface { + return &fakeFlowSchemas{ + gentype.NewFakeClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]( + fake.Fake, + "", + v1beta3.SchemeGroupVersion.WithResource("flowschemas"), + v1beta3.SchemeGroupVersion.WithKind("FlowSchema"), + func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} }, + func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }, + func(dst, src *v1beta3.FlowSchemaList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta3.FlowSchemaList) []*v1beta3.FlowSchema { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta3.FlowSchemaList, items []*v1beta3.FlowSchema) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta3.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go index 45836a645..57ea20cfc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedflowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" ) -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { +// fakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type fakePriorityLevelConfigurations struct { + *gentype.FakeClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] Fake *FakeFlowcontrolV1beta3 } -var prioritylevelconfigurationsResource = v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { - emptyResult := &v1beta3.PriorityLevelConfigurationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta3.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta3.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1beta3.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta3.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta3.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - emptyResult := &v1beta3.PriorityLevelConfiguration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityLevelConfigurations(fake *FakeFlowcontrolV1beta3) typedflowcontrolv1beta3.PriorityLevelConfigurationInterface { + return &fakePriorityLevelConfigurations{ + gentype.NewFakeClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( + fake.Fake, + "", + v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations"), + v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"), + func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} }, + func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }, + func(dst, src *v1beta3.PriorityLevelConfigurationList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta3.PriorityLevelConfigurationList) []*v1beta3.PriorityLevelConfiguration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta3.PriorityLevelConfigurationList, items []*v1beta3.PriorityLevelConfiguration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta3.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go index 461120bd3..b32dc911c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta3 import ( - "net/http" + http "net/http" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta3Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta3.SchemeGroupVersion + gv := flowcontrolv1beta3.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go index 35f600cdf..1e0d9feb1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta3 import ( - "context" + context "context" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + applyconfigurationsflowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta3.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta3.FlowSchema, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta3.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta3.FlowSchemaList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta3.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta3.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta3.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.FlowSchema, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - *gentype.ClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1beta3.FlowSchema, *flowcontrolv1beta3.FlowSchemaList, *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas { return &flowSchemas{ - gentype.NewClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1beta3.FlowSchema, *flowcontrolv1beta3.FlowSchemaList, *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration]( "flowschemas", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} }, - func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }), + func() *flowcontrolv1beta3.FlowSchema { return &flowcontrolv1beta3.FlowSchema{} }, + func() *flowcontrolv1beta3.FlowSchemaList { return &flowcontrolv1beta3.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1beta3.FlowSchema](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go index 93842e0cf..91bbf3fb1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta3 import ( - "context" + context "context" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + applyconfigurationsflowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta3.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta3.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - *gentype.ClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] + *gentype.ClientWithListAndApply[*flowcontrolv1beta3.PriorityLevelConfiguration, *flowcontrolv1beta3.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - gentype.NewClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( + gentype.NewClientWithListAndApply[*flowcontrolv1beta3.PriorityLevelConfiguration, *flowcontrolv1beta3.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( "prioritylevelconfigurations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} }, - func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }), + func() *flowcontrolv1beta3.PriorityLevelConfiguration { + return &flowcontrolv1beta3.PriorityLevelConfiguration{} + }, + func() *flowcontrolv1beta3.PriorityLevelConfigurationList { + return &flowcontrolv1beta3.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1beta3.PriorityLevelConfiguration](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go index a9693338b..7346dde66 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go @@ -19,179 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" ) -// FakeIngresses implements IngressInterface -type FakeIngresses struct { +// fakeIngresses implements IngressInterface +type fakeIngresses struct { + *gentype.FakeClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration] Fake *FakeNetworkingV1 - ns string -} - -var ingressesResource = v1.SchemeGroupVersion.WithResource("ingresses") - -var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress") - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Ingress), err -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - emptyResult := &v1.IngressList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.IngressList{ListMeta: obj.(*v1.IngressList).ListMeta} - for _, item := range obj.(*v1.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) - -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Ingress), err -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Ingress), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Ingress), err -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1.Ingress{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.IngressList{}) - return err -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Ingress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Ingress), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - emptyResult := &v1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeIngresses(fake *FakeNetworkingV1, namespace string) typednetworkingv1.IngressInterface { + return &fakeIngresses{ + gentype.NewFakeClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("ingresses"), + v1.SchemeGroupVersion.WithKind("Ingress"), + func() *v1.Ingress { return &v1.Ingress{} }, + func() *v1.IngressList { return &v1.IngressList{} }, + func(dst, src *v1.IngressList) { dst.ListMeta = src.ListMeta }, + func(list *v1.IngressList) []*v1.Ingress { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.IngressList, items []*v1.Ingress) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go index cdbd59445..c4700a45f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" ) -// FakeIngressClasses implements IngressClassInterface -type FakeIngressClasses struct { +// fakeIngressClasses implements IngressClassInterface +type fakeIngressClasses struct { + *gentype.FakeClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration] Fake *FakeNetworkingV1 } -var ingressclassesResource = v1.SchemeGroupVersion.WithResource("ingressclasses") - -var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass") - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { - emptyResult := &v1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.IngressClass), err -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { - emptyResult := &v1.IngressClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.IngressClassList{ListMeta: obj.(*v1.IngressClassList).ListMeta} - for _, item := range obj.(*v1.IngressClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts)) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { - emptyResult := &v1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.IngressClass), err -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { - emptyResult := &v1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.IngressClass), err -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1.IngressClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.IngressClassList{}) - return err -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { - emptyResult := &v1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.IngressClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - emptyResult := &v1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeIngressClasses(fake *FakeNetworkingV1) typednetworkingv1.IngressClassInterface { + return &fakeIngressClasses{ + gentype.NewFakeClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("ingressclasses"), + v1.SchemeGroupVersion.WithKind("IngressClass"), + func() *v1.IngressClass { return &v1.IngressClass{} }, + func() *v1.IngressClassList { return &v1.IngressClassList{} }, + func(dst, src *v1.IngressClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1.IngressClassList) []*v1.IngressClass { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.IngressClassList, items []*v1.IngressClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.IngressClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go index ed1639e2f..3b6a36ffe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go @@ -29,15 +29,15 @@ type FakeNetworkingV1 struct { } func (c *FakeNetworkingV1) Ingresses(namespace string) v1.IngressInterface { - return &FakeIngresses{c, namespace} + return newFakeIngresses(c, namespace) } func (c *FakeNetworkingV1) IngressClasses() v1.IngressClassInterface { - return &FakeIngressClasses{c} + return newFakeIngressClasses(c) } func (c *FakeNetworkingV1) NetworkPolicies(namespace string) v1.NetworkPolicyInterface { - return &FakeNetworkPolicies{c, namespace} + return newFakeNetworkPolicies(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 9098bf42e..8b017351a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -19,142 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" ) -// FakeNetworkPolicies implements NetworkPolicyInterface -type FakeNetworkPolicies struct { +// fakeNetworkPolicies implements NetworkPolicyInterface +type fakeNetworkPolicies struct { + *gentype.FakeClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration] Fake *FakeNetworkingV1 - ns string -} - -var networkpoliciesResource = v1.SchemeGroupVersion.WithResource("networkpolicies") - -var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy") - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - emptyResult := &v1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.NetworkPolicy), err -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - emptyResult := &v1.NetworkPolicyList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.NetworkPolicyList{ListMeta: obj.(*v1.NetworkPolicyList).ListMeta} - for _, item := range obj.(*v1.NetworkPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts)) - } -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { - emptyResult := &v1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.NetworkPolicy), err -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - emptyResult := &v1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.NetworkPolicy), err -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1.NetworkPolicy{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { - emptyResult := &v1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.NetworkPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - emptyResult := &v1.NetworkPolicy{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeNetworkPolicies(fake *FakeNetworkingV1, namespace string) typednetworkingv1.NetworkPolicyInterface { + return &fakeNetworkPolicies{ + gentype.NewFakeClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("networkpolicies"), + v1.SchemeGroupVersion.WithKind("NetworkPolicy"), + func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} }, + func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }, + func(dst, src *v1.NetworkPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1.NetworkPolicyList) []*v1.NetworkPolicy { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.NetworkPolicyList, items []*v1.NetworkPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.NetworkPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go index afaff4912..76d416249 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) - Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + Create(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.CreateOptions) (*networkingv1.Ingress, error) + Update(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.UpdateOptions) (*networkingv1.Ingress, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.UpdateOptions) (*networkingv1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Ingress, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.Ingress, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.IngressList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) - Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.Ingress, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1.Ingress, *networkingv1.IngressList, *applyconfigurationsnetworkingv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1Client, namespace string) *ingresses { return &ingresses{ - gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1.Ingress, *networkingv1.IngressList, *applyconfigurationsnetworkingv1.IngressApplyConfiguration]( "ingresses", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Ingress { return &v1.Ingress{} }, - func() *v1.IngressList { return &v1.IngressList{} }), + func() *networkingv1.Ingress { return &networkingv1.Ingress{} }, + func() *networkingv1.IngressList { return &networkingv1.IngressList{} }, + gentype.PrefersProtobuf[*networkingv1.Ingress](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go index 3301e8799..3bbbf9e15 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type IngressClassesGetter interface { // IngressClassInterface has methods to work with IngressClass resources. type IngressClassInterface interface { - Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (*v1.IngressClass, error) - Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (*v1.IngressClass, error) + Create(ctx context.Context, ingressClass *networkingv1.IngressClass, opts metav1.CreateOptions) (*networkingv1.IngressClass, error) + Update(ctx context.Context, ingressClass *networkingv1.IngressClass, opts metav1.UpdateOptions) (*networkingv1.IngressClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.IngressClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.IngressClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.IngressClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) - Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.IngressClass, err error) + Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.IngressClass, err error) IngressClassExpansion } // ingressClasses implements IngressClassInterface type ingressClasses struct { - *gentype.ClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1.IngressClass, *networkingv1.IngressClassList, *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1Client) *ingressClasses { return &ingressClasses{ - gentype.NewClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1.IngressClass, *networkingv1.IngressClassList, *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration]( "ingressclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.IngressClass { return &v1.IngressClass{} }, - func() *v1.IngressClassList { return &v1.IngressClassList{} }), + func() *networkingv1.IngressClass { return &networkingv1.IngressClass{} }, + func() *networkingv1.IngressClassList { return &networkingv1.IngressClassList{} }, + gentype.PrefersProtobuf[*networkingv1.IngressClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 3b72a7ae9..692b52f02 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/networking/v1" - "k8s.io/client-go/kubernetes/scheme" + networkingv1 "k8s.io/api/networking/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -95,10 +95,10 @@ func New(c rest.Interface) *NetworkingV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := networkingv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index ba2ef32db..2758c2bfb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) - Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) + Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.CreateOptions) (*networkingv1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.UpdateOptions) (*networkingv1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.NetworkPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.NetworkPolicyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) - Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error) + Apply(ctx context.Context, networkPolicy *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.NetworkPolicy, err error) NetworkPolicyExpansion } // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - *gentype.ClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList, *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { return &networkPolicies{ - gentype.NewClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList, *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration]( "networkpolicies", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} }, - func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }), + func() *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{} }, + func() *networkingv1.NetworkPolicyList { return &networkingv1.NetworkPolicyList{} }, + gentype.PrefersProtobuf[*networkingv1.NetworkPolicy](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go index 6ce62b331..b0a5570a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" ) -// FakeIPAddresses implements IPAddressInterface -type FakeIPAddresses struct { +// fakeIPAddresses implements IPAddressInterface +type fakeIPAddresses struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration] Fake *FakeNetworkingV1alpha1 } -var ipaddressesResource = v1alpha1.SchemeGroupVersion.WithResource("ipaddresses") - -var ipaddressesKind = v1alpha1.SchemeGroupVersion.WithKind("IPAddress") - -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. -func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { - emptyResult := &v1alpha1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. -func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { - emptyResult := &v1alpha1.IPAddressList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.IPAddressList{ListMeta: obj.(*v1alpha1.IPAddressList).ListMeta} - for _, item := range obj.(*v1alpha1.IPAddressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested iPAddresses. -func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts)) -} - -// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { - emptyResult := &v1alpha1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { - emptyResult := &v1alpha1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. -func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ipaddressesResource, name, opts), &v1alpha1.IPAddress{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.IPAddressList{}) - return err -} - -// Patch applies the patch and returns the patched iPAddress. -func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { - emptyResult := &v1alpha1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. -func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { - if iPAddress == nil { - return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") - } - data, err := json.Marshal(iPAddress) - if err != nil { - return nil, err - } - name := iPAddress.Name - if name == nil { - return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") - } - emptyResult := &v1alpha1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeIPAddresses(fake *FakeNetworkingV1alpha1) typednetworkingv1alpha1.IPAddressInterface { + return &fakeIPAddresses{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("ipaddresses"), + v1alpha1.SchemeGroupVersion.WithKind("IPAddress"), + func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} }, + func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }, + func(dst, src *v1alpha1.IPAddressList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.IPAddressList) []*v1alpha1.IPAddress { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.IPAddressList, items []*v1alpha1.IPAddress) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.IPAddress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go index 80ad184bb..e0f55f744 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -29,11 +29,11 @@ type FakeNetworkingV1alpha1 struct { } func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface { - return &FakeIPAddresses{c} + return newFakeIPAddresses(c) } func (c *FakeNetworkingV1alpha1) ServiceCIDRs() v1alpha1.ServiceCIDRInterface { - return &FakeServiceCIDRs{c} + return newFakeServiceCIDRs(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go index 27a78e1ba..fe6c0cdb6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" ) -// FakeServiceCIDRs implements ServiceCIDRInterface -type FakeServiceCIDRs struct { +// fakeServiceCIDRs implements ServiceCIDRInterface +type fakeServiceCIDRs struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration] Fake *FakeNetworkingV1alpha1 } -var servicecidrsResource = v1alpha1.SchemeGroupVersion.WithResource("servicecidrs") - -var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR") - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { - emptyResult := &v1alpha1.ServiceCIDRList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ServiceCIDRList{ListMeta: obj.(*v1alpha1.ServiceCIDRList).ListMeta} - for _, item := range obj.(*v1alpha1.ServiceCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts)) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1alpha1.ServiceCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeServiceCIDRs(fake *FakeNetworkingV1alpha1) typednetworkingv1alpha1.ServiceCIDRInterface { + return &fakeServiceCIDRs{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("servicecidrs"), + v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR"), + func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} }, + func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }, + func(dst, src *v1alpha1.ServiceCIDRList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ServiceCIDRList) []*v1alpha1.ServiceCIDR { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ServiceCIDRList, items []*v1alpha1.ServiceCIDR) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.ServiceCIDR), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go index 33e90d18a..e96a564ab 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type IPAddressesGetter interface { // IPAddressInterface has methods to work with IPAddress resources. type IPAddressInterface interface { - Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) - Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Create(ctx context.Context, iPAddress *networkingv1alpha1.IPAddress, opts v1.CreateOptions) (*networkingv1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *networkingv1alpha1.IPAddress, opts v1.UpdateOptions) (*networkingv1alpha1.IPAddress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1alpha1.IPAddressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) - Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.IPAddress, err error) IPAddressExpansion } // iPAddresses implements IPAddressInterface type iPAddresses struct { - *gentype.ClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1alpha1.IPAddress, *networkingv1alpha1.IPAddressList, *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration] } // newIPAddresses returns a IPAddresses func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { return &iPAddresses{ - gentype.NewClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1alpha1.IPAddress, *networkingv1alpha1.IPAddressList, *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration]( "ipaddresses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} }, - func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }), + func() *networkingv1alpha1.IPAddress { return &networkingv1alpha1.IPAddress{} }, + func() *networkingv1alpha1.IPAddressList { return &networkingv1alpha1.IPAddressList{} }, + gentype.PrefersProtobuf[*networkingv1alpha1.IPAddress](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index c730e6246..9e1b3064d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *NetworkingV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := networkingv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go index b72fe5b69..38cc26010 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type ServiceCIDRsGetter interface { // ServiceCIDRInterface has methods to work with ServiceCIDR resources. type ServiceCIDRInterface interface { - Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) - Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + Create(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.CreateOptions) (*networkingv1alpha1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1alpha1.ServiceCIDR, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + UpdateStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1alpha1.ServiceCIDR, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1alpha1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1alpha1.ServiceCIDRList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) - Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1alpha1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.ServiceCIDR, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + ApplyStatus(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.ServiceCIDR, err error) ServiceCIDRExpansion } // serviceCIDRs implements ServiceCIDRInterface type serviceCIDRs struct { - *gentype.ClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1alpha1.ServiceCIDR, *networkingv1alpha1.ServiceCIDRList, *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration] } // newServiceCIDRs returns a ServiceCIDRs func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { return &serviceCIDRs{ - gentype.NewClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1alpha1.ServiceCIDR, *networkingv1alpha1.ServiceCIDRList, *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration]( "servicecidrs", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} }, - func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }), + func() *networkingv1alpha1.ServiceCIDR { return &networkingv1alpha1.ServiceCIDR{} }, + func() *networkingv1alpha1.ServiceCIDRList { return &networkingv1alpha1.ServiceCIDRList{} }, + gentype.PrefersProtobuf[*networkingv1alpha1.ServiceCIDR](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index 59bf762a0..0d5dc9d64 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -19,179 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" ) -// FakeIngresses implements IngressInterface -type FakeIngresses struct { +// fakeIngresses implements IngressInterface +type fakeIngresses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration] Fake *FakeNetworkingV1beta1 - ns string -} - -var ingressesResource = v1beta1.SchemeGroupVersion.WithResource("ingresses") - -var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - emptyResult := &v1beta1.IngressList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IngressList{ListMeta: obj.(*v1beta1.IngressList).ListMeta} - for _, item := range obj.(*v1beta1.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) - -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1beta1.Ingress{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) - return err -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Ingress), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - emptyResult := &v1beta1.Ingress{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeIngresses(fake *FakeNetworkingV1beta1, namespace string) typednetworkingv1beta1.IngressInterface { + return &fakeIngresses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("ingresses"), + v1beta1.SchemeGroupVersion.WithKind("Ingress"), + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }, + func(dst, src *v1beta1.IngressList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.IngressList) []*v1beta1.Ingress { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.IngressList, items []*v1beta1.Ingress) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go index 3001de8e4..1c7822184 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" ) -// FakeIngressClasses implements IngressClassInterface -type FakeIngressClasses struct { +// fakeIngressClasses implements IngressClassInterface +type fakeIngressClasses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration] Fake *FakeNetworkingV1beta1 } -var ingressclassesResource = v1beta1.SchemeGroupVersion.WithResource("ingressclasses") - -var ingressclassesKind = v1beta1.SchemeGroupVersion.WithKind("IngressClass") - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { - emptyResult := &v1beta1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IngressClass), err -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { - emptyResult := &v1beta1.IngressClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IngressClassList{ListMeta: obj.(*v1beta1.IngressClassList).ListMeta} - for _, item := range obj.(*v1beta1.IngressClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *FakeIngressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts)) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { - emptyResult := &v1beta1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IngressClass), err -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { - emptyResult := &v1beta1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IngressClass), err -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1beta1.IngressClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressClassList{}) - return err -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { - emptyResult := &v1beta1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IngressClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - emptyResult := &v1beta1.IngressClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeIngressClasses(fake *FakeNetworkingV1beta1) typednetworkingv1beta1.IngressClassInterface { + return &fakeIngressClasses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("ingressclasses"), + v1beta1.SchemeGroupVersion.WithKind("IngressClass"), + func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} }, + func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }, + func(dst, src *v1beta1.IngressClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.IngressClassList) []*v1beta1.IngressClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.IngressClassList, items []*v1beta1.IngressClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.IngressClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go index d8352bb79..1dc63cd69 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" ) -// FakeIPAddresses implements IPAddressInterface -type FakeIPAddresses struct { +// fakeIPAddresses implements IPAddressInterface +type fakeIPAddresses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration] Fake *FakeNetworkingV1beta1 } -var ipaddressesResource = v1beta1.SchemeGroupVersion.WithResource("ipaddresses") - -var ipaddressesKind = v1beta1.SchemeGroupVersion.WithKind("IPAddress") - -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. -func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IPAddress, err error) { - emptyResult := &v1beta1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IPAddress), err -} - -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. -func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IPAddressList, err error) { - emptyResult := &v1beta1.IPAddressList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IPAddressList{ListMeta: obj.(*v1beta1.IPAddressList).ListMeta} - for _, item := range obj.(*v1beta1.IPAddressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested iPAddresses. -func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts)) -} - -// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (result *v1beta1.IPAddress, err error) { - emptyResult := &v1beta1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IPAddress), err -} - -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (result *v1beta1.IPAddress, err error) { - emptyResult := &v1beta1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IPAddress), err -} - -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. -func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ipaddressesResource, name, opts), &v1beta1.IPAddress{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IPAddressList{}) - return err -} - -// Patch applies the patch and returns the patched iPAddress. -func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) { - emptyResult := &v1beta1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.IPAddress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. -func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) { - if iPAddress == nil { - return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") - } - data, err := json.Marshal(iPAddress) - if err != nil { - return nil, err - } - name := iPAddress.Name - if name == nil { - return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") - } - emptyResult := &v1beta1.IPAddress{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeIPAddresses(fake *FakeNetworkingV1beta1) typednetworkingv1beta1.IPAddressInterface { + return &fakeIPAddresses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("ipaddresses"), + v1beta1.SchemeGroupVersion.WithKind("IPAddress"), + func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} }, + func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }, + func(dst, src *v1beta1.IPAddressList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.IPAddressList) []*v1beta1.IPAddress { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.IPAddressList, items []*v1beta1.IPAddress) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.IPAddress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go index bd72d5929..59305ee81 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go @@ -29,19 +29,19 @@ type FakeNetworkingV1beta1 struct { } func (c *FakeNetworkingV1beta1) IPAddresses() v1beta1.IPAddressInterface { - return &FakeIPAddresses{c} + return newFakeIPAddresses(c) } func (c *FakeNetworkingV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { - return &FakeIngresses{c, namespace} + return newFakeIngresses(c, namespace) } func (c *FakeNetworkingV1beta1) IngressClasses() v1beta1.IngressClassInterface { - return &FakeIngressClasses{c} + return newFakeIngressClasses(c) } func (c *FakeNetworkingV1beta1) ServiceCIDRs() v1beta1.ServiceCIDRInterface { - return &FakeServiceCIDRs{c} + return newFakeServiceCIDRs(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go index 0eb5b2f2b..d8b15206b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go @@ -19,168 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" ) -// FakeServiceCIDRs implements ServiceCIDRInterface -type FakeServiceCIDRs struct { +// fakeServiceCIDRs implements ServiceCIDRInterface +type fakeServiceCIDRs struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration] Fake *FakeNetworkingV1beta1 } -var servicecidrsResource = v1beta1.SchemeGroupVersion.WithResource("servicecidrs") - -var servicecidrsKind = v1beta1.SchemeGroupVersion.WithKind("ServiceCIDR") - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ServiceCIDR, err error) { - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ServiceCIDR), err -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ServiceCIDRList, err error) { - emptyResult := &v1beta1.ServiceCIDRList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ServiceCIDRList{ListMeta: obj.(*v1beta1.ServiceCIDRList).ListMeta} - for _, item := range obj.(*v1beta1.ServiceCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts)) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (result *v1beta1.ServiceCIDR, err error) { - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ServiceCIDR), err -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) { - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ServiceCIDR), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) { - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ServiceCIDR), err -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1beta1.ServiceCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ServiceCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) { - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ServiceCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ServiceCIDR), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - emptyResult := &v1beta1.ServiceCIDR{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeServiceCIDRs(fake *FakeNetworkingV1beta1) typednetworkingv1beta1.ServiceCIDRInterface { + return &fakeServiceCIDRs{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("servicecidrs"), + v1beta1.SchemeGroupVersion.WithKind("ServiceCIDR"), + func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} }, + func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }, + func(dst, src *v1beta1.ServiceCIDRList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ServiceCIDRList) []*v1beta1.ServiceCIDR { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.ServiceCIDRList, items []*v1beta1.ServiceCIDR) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ServiceCIDR), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index 90be275ad..b2d5aa2ce 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) - Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Create(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.CreateOptions) (*networkingv1beta1.Ingress, error) + Update(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.UpdateOptions) (*networkingv1beta1.Ingress, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.UpdateOptions) (*networkingv1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.Ingress, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IngressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) - Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.Ingress, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1beta1.Ingress, *networkingv1beta1.IngressList, *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { return &ingresses{ - gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1beta1.Ingress, *networkingv1beta1.IngressList, *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration]( "ingresses", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, - func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), + func() *networkingv1beta1.Ingress { return &networkingv1beta1.Ingress{} }, + func() *networkingv1beta1.IngressList { return &networkingv1beta1.IngressList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.Ingress](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go index c55da4168..dd37fc5cd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type IngressClassesGetter interface { // IngressClassInterface has methods to work with IngressClass resources. type IngressClassInterface interface { - Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (*v1beta1.IngressClass, error) - Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (*v1beta1.IngressClass, error) + Create(ctx context.Context, ingressClass *networkingv1beta1.IngressClass, opts v1.CreateOptions) (*networkingv1beta1.IngressClass, error) + Update(ctx context.Context, ingressClass *networkingv1beta1.IngressClass, opts v1.UpdateOptions) (*networkingv1beta1.IngressClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IngressClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.IngressClass, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IngressClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) - Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.IngressClass, err error) + Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.IngressClass, err error) IngressClassExpansion } // ingressClasses implements IngressClassInterface type ingressClasses struct { - *gentype.ClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1beta1.IngressClass, *networkingv1beta1.IngressClassList, *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses { return &ingressClasses{ - gentype.NewClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1beta1.IngressClass, *networkingv1beta1.IngressClassList, *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration]( "ingressclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} }, - func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }), + func() *networkingv1beta1.IngressClass { return &networkingv1beta1.IngressClass{} }, + func() *networkingv1beta1.IngressClassList { return &networkingv1beta1.IngressClassList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.IngressClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go index 09e4139e7..0b7ffff72 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type IPAddressesGetter interface { // IPAddressInterface has methods to work with IPAddress resources. type IPAddressInterface interface { - Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (*v1beta1.IPAddress, error) - Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (*v1beta1.IPAddress, error) + Create(ctx context.Context, iPAddress *networkingv1beta1.IPAddress, opts v1.CreateOptions) (*networkingv1beta1.IPAddress, error) + Update(ctx context.Context, iPAddress *networkingv1beta1.IPAddress, opts v1.UpdateOptions) (*networkingv1beta1.IPAddress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IPAddress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IPAddressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IPAddressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) - Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.IPAddress, err error) IPAddressExpansion } // iPAddresses implements IPAddressInterface type iPAddresses struct { - *gentype.ClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1beta1.IPAddress, *networkingv1beta1.IPAddressList, *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration] } // newIPAddresses returns a IPAddresses func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses { return &iPAddresses{ - gentype.NewClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1beta1.IPAddress, *networkingv1beta1.IPAddressList, *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration]( "ipaddresses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} }, - func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }), + func() *networkingv1beta1.IPAddress { return &networkingv1beta1.IPAddress{} }, + func() *networkingv1beta1.IPAddressList { return &networkingv1beta1.IPAddressList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.IPAddress](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index d35225abd..cb4b0c601 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *NetworkingV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := networkingv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go index d3336f2ec..6ad1daf74 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type ServiceCIDRsGetter interface { // ServiceCIDRInterface has methods to work with ServiceCIDR resources. type ServiceCIDRInterface interface { - Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (*v1beta1.ServiceCIDR, error) - Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + Create(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.CreateOptions) (*networkingv1beta1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1beta1.ServiceCIDR, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + UpdateStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1beta1.ServiceCIDR, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ServiceCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ServiceCIDRList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.ServiceCIDRList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) - Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.ServiceCIDR, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + ApplyStatus(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.ServiceCIDR, err error) ServiceCIDRExpansion } // serviceCIDRs implements ServiceCIDRInterface type serviceCIDRs struct { - *gentype.ClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration] + *gentype.ClientWithListAndApply[*networkingv1beta1.ServiceCIDR, *networkingv1beta1.ServiceCIDRList, *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration] } // newServiceCIDRs returns a ServiceCIDRs func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs { return &serviceCIDRs{ - gentype.NewClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]( + gentype.NewClientWithListAndApply[*networkingv1beta1.ServiceCIDR, *networkingv1beta1.ServiceCIDRList, *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration]( "servicecidrs", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} }, - func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }), + func() *networkingv1beta1.ServiceCIDR { return &networkingv1beta1.ServiceCIDR{} }, + func() *networkingv1beta1.ServiceCIDRList { return &networkingv1beta1.ServiceCIDRList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.ServiceCIDR](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go index dea10cbad..72e675bd7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go @@ -29,7 +29,7 @@ type FakeNodeV1 struct { } func (c *FakeNodeV1) RuntimeClasses() v1.RuntimeClassInterface { - return &FakeRuntimeClasses{c} + return newFakeRuntimeClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go index 0a5270628..a9739f725 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/node/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" nodev1 "k8s.io/client-go/applyconfigurations/node/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednodev1 "k8s.io/client-go/kubernetes/typed/node/v1" ) -// FakeRuntimeClasses implements RuntimeClassInterface -type FakeRuntimeClasses struct { +// fakeRuntimeClasses implements RuntimeClassInterface +type fakeRuntimeClasses struct { + *gentype.FakeClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration] Fake *FakeNodeV1 } -var runtimeclassesResource = v1.SchemeGroupVersion.WithResource("runtimeclasses") - -var runtimeclassesKind = v1.SchemeGroupVersion.WithKind("RuntimeClass") - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { - emptyResult := &v1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RuntimeClass), err -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { - emptyResult := &v1.RuntimeClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.RuntimeClassList{ListMeta: obj.(*v1.RuntimeClassList).ListMeta} - for _, item := range obj.(*v1.RuntimeClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { - emptyResult := &v1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RuntimeClass), err -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { - emptyResult := &v1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RuntimeClass), err -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1.RuntimeClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.RuntimeClassList{}) - return err -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { - emptyResult := &v1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RuntimeClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - emptyResult := &v1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeRuntimeClasses(fake *FakeNodeV1) typednodev1.RuntimeClassInterface { + return &fakeRuntimeClasses{ + gentype.NewFakeClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("runtimeclasses"), + v1.SchemeGroupVersion.WithKind("RuntimeClass"), + func() *v1.RuntimeClass { return &v1.RuntimeClass{} }, + func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }, + func(dst, src *v1.RuntimeClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1.RuntimeClassList) []*v1.RuntimeClass { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.RuntimeClassList, items []*v1.RuntimeClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go index 844f9fc70..3bde21171 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/node/v1" - "k8s.io/client-go/kubernetes/scheme" + nodev1 "k8s.io/api/node/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := nodev1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go index 6c8110640..77311fab7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/node/v1" + nodev1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - nodev1 "k8s.io/client-go/applyconfigurations/node/v1" + applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (*v1.RuntimeClass, error) - Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (*v1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts metav1.CreateOptions) (*nodev1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts metav1.UpdateOptions) (*nodev1.RuntimeClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RuntimeClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.RuntimeClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*nodev1.RuntimeClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*nodev1.RuntimeClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) - Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *nodev1.RuntimeClass, err error) + Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *nodev1.RuntimeClass, err error) RuntimeClassExpansion } // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - *gentype.ClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration] + *gentype.ClientWithListAndApply[*nodev1.RuntimeClass, *nodev1.RuntimeClassList, *applyconfigurationsnodev1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { return &runtimeClasses{ - gentype.NewClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*nodev1.RuntimeClass, *nodev1.RuntimeClassList, *applyconfigurationsnodev1.RuntimeClassApplyConfiguration]( "runtimeclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.RuntimeClass { return &v1.RuntimeClass{} }, - func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }), + func() *nodev1.RuntimeClass { return &nodev1.RuntimeClass{} }, + func() *nodev1.RuntimeClassList { return &nodev1.RuntimeClassList{} }, + gentype.PrefersProtobuf[*nodev1.RuntimeClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go index 21ab9de33..6a63f99da 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go @@ -29,7 +29,7 @@ type FakeNodeV1alpha1 struct { } func (c *FakeNodeV1alpha1) RuntimeClasses() v1alpha1.RuntimeClassInterface { - return &FakeRuntimeClasses{c} + return newFakeRuntimeClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index bcd261d00..6761098c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/node/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" ) -// FakeRuntimeClasses implements RuntimeClassInterface -type FakeRuntimeClasses struct { +// fakeRuntimeClasses implements RuntimeClassInterface +type fakeRuntimeClasses struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration] Fake *FakeNodeV1alpha1 } -var runtimeclassesResource = v1alpha1.SchemeGroupVersion.WithResource("runtimeclasses") - -var runtimeclassesKind = v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass") - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - emptyResult := &v1alpha1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - emptyResult := &v1alpha1.RuntimeClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RuntimeClassList{ListMeta: obj.(*v1alpha1.RuntimeClassList).ListMeta} - for _, item := range obj.(*v1alpha1.RuntimeClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { - emptyResult := &v1alpha1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { - emptyResult := &v1alpha1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1alpha1.RuntimeClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{}) - return err -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - emptyResult := &v1alpha1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - emptyResult := &v1alpha1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeRuntimeClasses(fake *FakeNodeV1alpha1) typednodev1alpha1.RuntimeClassInterface { + return &fakeRuntimeClasses{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"), + v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass"), + func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} }, + func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }, + func(dst, src *v1alpha1.RuntimeClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.RuntimeClassList) []*v1alpha1.RuntimeClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.RuntimeClassList, items []*v1alpha1.RuntimeClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index 2a197d58e..e47ef3548 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := nodev1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index 60aa4a213..f9da4f07b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/node/v1alpha1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (*v1alpha1.RuntimeClass, error) - Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (*v1alpha1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClass, opts v1.CreateOptions) (*nodev1alpha1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClass, opts v1.UpdateOptions) (*nodev1alpha1.RuntimeClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RuntimeClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*nodev1alpha1.RuntimeClass, error) + List(ctx context.Context, opts v1.ListOptions) (*nodev1alpha1.RuntimeClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) - Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1alpha1.RuntimeClass, err error) + Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1alpha1.RuntimeClass, err error) RuntimeClassExpansion } // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - *gentype.ClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration] + *gentype.ClientWithListAndApply[*nodev1alpha1.RuntimeClass, *nodev1alpha1.RuntimeClassList, *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { return &runtimeClasses{ - gentype.NewClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*nodev1alpha1.RuntimeClass, *nodev1alpha1.RuntimeClassList, *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration]( "runtimeclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} }, - func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }), + func() *nodev1alpha1.RuntimeClass { return &nodev1alpha1.RuntimeClass{} }, + func() *nodev1alpha1.RuntimeClassList { return &nodev1alpha1.RuntimeClassList{} }, + gentype.PrefersProtobuf[*nodev1alpha1.RuntimeClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go index 36976ce54..6612a9889 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go @@ -29,7 +29,7 @@ type FakeNodeV1beta1 struct { } func (c *FakeNodeV1beta1) RuntimeClasses() v1beta1.RuntimeClassInterface { - return &FakeRuntimeClasses{c} + return newFakeRuntimeClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index a3c8c018c..113ae8852 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/node/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typednodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" ) -// FakeRuntimeClasses implements RuntimeClassInterface -type FakeRuntimeClasses struct { +// fakeRuntimeClasses implements RuntimeClassInterface +type fakeRuntimeClasses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration] Fake *FakeNodeV1beta1 } -var runtimeclassesResource = v1beta1.SchemeGroupVersion.WithResource("runtimeclasses") - -var runtimeclassesKind = v1beta1.SchemeGroupVersion.WithKind("RuntimeClass") - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - emptyResult := &v1beta1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - emptyResult := &v1beta1.RuntimeClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.RuntimeClassList{ListMeta: obj.(*v1beta1.RuntimeClassList).ListMeta} - for _, item := range obj.(*v1beta1.RuntimeClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { - emptyResult := &v1beta1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { - emptyResult := &v1beta1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1beta1.RuntimeClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{}) - return err -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - emptyResult := &v1beta1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - emptyResult := &v1beta1.RuntimeClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeRuntimeClasses(fake *FakeNodeV1beta1) typednodev1beta1.RuntimeClassInterface { + return &fakeRuntimeClasses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("runtimeclasses"), + v1beta1.SchemeGroupVersion.WithKind("RuntimeClass"), + func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} }, + func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }, + func(dst, src *v1beta1.RuntimeClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.RuntimeClassList) []*v1beta1.RuntimeClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.RuntimeClassList, items []*v1beta1.RuntimeClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go index 4f6802ffa..c7864a479 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + nodev1beta1 "k8s.io/api/node/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := nodev1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index 8e15d5288..18089defd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/node/v1beta1" + nodev1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + applyconfigurationsnodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (*v1beta1.RuntimeClass, error) - Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (*v1beta1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClass, opts v1.CreateOptions) (*nodev1beta1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClass, opts v1.UpdateOptions) (*nodev1beta1.RuntimeClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RuntimeClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*nodev1beta1.RuntimeClass, error) + List(ctx context.Context, opts v1.ListOptions) (*nodev1beta1.RuntimeClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) - Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1beta1.RuntimeClass, err error) + Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1beta1.RuntimeClass, err error) RuntimeClassExpansion } // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - *gentype.ClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration] + *gentype.ClientWithListAndApply[*nodev1beta1.RuntimeClass, *nodev1beta1.RuntimeClassList, *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { return &runtimeClasses{ - gentype.NewClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*nodev1beta1.RuntimeClass, *nodev1beta1.RuntimeClassList, *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration]( "runtimeclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} }, - func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }), + func() *nodev1beta1.RuntimeClass { return &nodev1beta1.RuntimeClass{} }, + func() *nodev1beta1.RuntimeClassList { return &nodev1beta1.RuntimeClassList{} }, + gentype.PrefersProtobuf[*nodev1beta1.RuntimeClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go index 22173d36d..6cf60806a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/policy/v1" + policyv1 "k8s.io/api/policy/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -37,17 +37,19 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - *gentype.Client[*v1.Eviction] + *gentype.Client[*policyv1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1Client, namespace string) *evictions { return &evictions{ - gentype.NewClient[*v1.Eviction]( + gentype.NewClient[*policyv1.Eviction]( "evictions", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Eviction { return &v1.Eviction{} }), + func() *policyv1.Eviction { return &policyv1.Eviction{} }, + gentype.PrefersProtobuf[*policyv1.Eviction](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go index a579067ce..247cf19dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go @@ -18,8 +18,27 @@ limitations under the License. package fake -// FakeEvictions implements EvictionInterface -type FakeEvictions struct { +import ( + v1 "k8s.io/api/policy/v1" + gentype "k8s.io/client-go/gentype" + policyv1 "k8s.io/client-go/kubernetes/typed/policy/v1" +) + +// fakeEvictions implements EvictionInterface +type fakeEvictions struct { + *gentype.FakeClient[*v1.Eviction] Fake *FakePolicyV1 - ns string +} + +func newFakeEvictions(fake *FakePolicyV1, namespace string) policyv1.EvictionInterface { + return &fakeEvictions{ + gentype.NewFakeClient[*v1.Eviction]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("evictions"), + v1.SchemeGroupVersion.WithKind("Eviction"), + func() *v1.Eviction { return &v1.Eviction{} }, + ), + fake, + } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go index 1b6b4ade1..1e1d5e15b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go @@ -24,10 +24,10 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error { +func (c *fakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Namespace = c.ns + action.Namespace = c.Namespace() action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} action.Subresource = "eviction" action.Object = eviction diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go index de2bcc1b0..98e3b1adb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedpolicyv1 "k8s.io/client-go/kubernetes/typed/policy/v1" ) -// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface -type FakePodDisruptionBudgets struct { +// fakePodDisruptionBudgets implements PodDisruptionBudgetInterface +type fakePodDisruptionBudgets struct { + *gentype.FakeClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration] Fake *FakePolicyV1 - ns string -} - -var poddisruptionbudgetsResource = v1.SchemeGroupVersion.WithResource("poddisruptionbudgets") - -var poddisruptionbudgetsKind = v1.SchemeGroupVersion.WithKind("PodDisruptionBudget") - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { - emptyResult := &v1.PodDisruptionBudgetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PodDisruptionBudgetList{ListMeta: obj.(*v1.PodDisruptionBudgetList).ListMeta} - for _, item := range obj.(*v1.PodDisruptionBudgetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts)) - -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodDisruptionBudget), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &v1.PodDisruptionBudget{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PodDisruptionBudgetList{}) - return err -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - emptyResult := &v1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakePodDisruptionBudgets(fake *FakePolicyV1, namespace string) typedpolicyv1.PodDisruptionBudgetInterface { + return &fakePodDisruptionBudgets{ + gentype.NewFakeClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), + v1.SchemeGroupVersion.WithKind("PodDisruptionBudget"), + func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} }, + func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }, + func(dst, src *v1.PodDisruptionBudgetList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PodDisruptionBudgetList) []*v1.PodDisruptionBudget { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.PodDisruptionBudgetList, items []*v1.PodDisruptionBudget) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.PodDisruptionBudget), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go index d5bb3d549..49390c273 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go @@ -29,11 +29,11 @@ type FakePolicyV1 struct { } func (c *FakePolicyV1) Evictions(namespace string) v1.EvictionInterface { - return &FakeEvictions{c, namespace} + return newFakeEvictions(c, namespace) } func (c *FakePolicyV1) PodDisruptionBudgets(namespace string) v1.PodDisruptionBudgetInterface { - return &FakePodDisruptionBudgets{c, namespace} + return newFakePodDisruptionBudgets(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go index 6d011cbce..d45ed21f5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/policy/v1" + policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type PodDisruptionBudgetsGetter interface { // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. type PodDisruptionBudgetInterface interface { - Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error) - Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) + Create(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.CreateOptions) (*policyv1.PodDisruptionBudget, error) + Update(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) + UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodDisruptionBudget, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PodDisruptionBudgetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*policyv1.PodDisruptionBudget, error) + List(ctx context.Context, opts metav1.ListOptions) (*policyv1.PodDisruptionBudgetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) - Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *policyv1.PodDisruptionBudget, err error) + Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) + ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - *gentype.ClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration] + *gentype.ClientWithListAndApply[*policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudgetList, *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - gentype.NewClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]( + gentype.NewClientWithListAndApply[*policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudgetList, *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration]( "poddisruptionbudgets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} }, - func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }), + func() *policyv1.PodDisruptionBudget { return &policyv1.PodDisruptionBudget{} }, + func() *policyv1.PodDisruptionBudgetList { return &policyv1.PodDisruptionBudgetList{} }, + gentype.PrefersProtobuf[*policyv1.PodDisruptionBudget](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go index 9bfd98aa9..8d84f460b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/policy/v1" - "k8s.io/client-go/kubernetes/scheme" + policyv1 "k8s.io/api/policy/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *PolicyV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := policyv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index e003ece6b..de4c35e76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -37,17 +37,19 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - *gentype.Client[*v1beta1.Eviction] + *gentype.Client[*policyv1beta1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { return &evictions{ - gentype.NewClient[*v1beta1.Eviction]( + gentype.NewClient[*policyv1beta1.Eviction]( "evictions", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Eviction { return &v1beta1.Eviction{} }), + func() *policyv1beta1.Eviction { return &policyv1beta1.Eviction{} }, + gentype.PrefersProtobuf[*policyv1beta1.Eviction](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go index b8f6f3eae..fb2a1de08 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go @@ -18,8 +18,27 @@ limitations under the License. package fake -// FakeEvictions implements EvictionInterface -type FakeEvictions struct { +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + gentype "k8s.io/client-go/gentype" + policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" +) + +// fakeEvictions implements EvictionInterface +type fakeEvictions struct { + *gentype.FakeClient[*v1beta1.Eviction] Fake *FakePolicyV1beta1 - ns string +} + +func newFakeEvictions(fake *FakePolicyV1beta1, namespace string) policyv1beta1.EvictionInterface { + return &fakeEvictions{ + gentype.NewFakeClient[*v1beta1.Eviction]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("evictions"), + v1beta1.SchemeGroupVersion.WithKind("Eviction"), + func() *v1beta1.Eviction { return &v1beta1.Eviction{} }, + ), + fake, + } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go index f97522bb3..30c5df30b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go @@ -24,10 +24,10 @@ import ( core "k8s.io/client-go/testing" ) -func (c *FakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error { +func (c *fakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Namespace = c.ns + action.Namespace = c.Namespace() action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} action.Subresource = "eviction" action.Object = eviction diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index fbd9d01e0..d4cab0b4a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedpolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" ) -// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface -type FakePodDisruptionBudgets struct { +// fakePodDisruptionBudgets implements PodDisruptionBudgetInterface +type fakePodDisruptionBudgets struct { + *gentype.FakeClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration] Fake *FakePolicyV1beta1 - ns string -} - -var poddisruptionbudgetsResource = v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets") - -var poddisruptionbudgetsKind = v1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget") - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - emptyResult := &v1beta1.PodDisruptionBudgetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PodDisruptionBudgetList{ListMeta: obj.(*v1beta1.PodDisruptionBudgetList).ListMeta} - for _, item := range obj.(*v1beta1.PodDisruptionBudgetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts)) - -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PodDisruptionBudget), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &v1beta1.PodDisruptionBudget{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) - return err -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - emptyResult := &v1beta1.PodDisruptionBudget{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakePodDisruptionBudgets(fake *FakePolicyV1beta1, namespace string) typedpolicyv1beta1.PodDisruptionBudgetInterface { + return &fakePodDisruptionBudgets{ + gentype.NewFakeClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), + v1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"), + func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} }, + func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }, + func(dst, src *v1beta1.PodDisruptionBudgetList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.PodDisruptionBudgetList) []*v1beta1.PodDisruptionBudget { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.PodDisruptionBudgetList, items []*v1beta1.PodDisruptionBudget) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.PodDisruptionBudget), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go index 90670b113..136935dd4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go @@ -29,11 +29,11 @@ type FakePolicyV1beta1 struct { } func (c *FakePolicyV1beta1) Evictions(namespace string) v1beta1.EvictionInterface { - return &FakeEvictions{c, namespace} + return newFakeEvictions(c, namespace) } func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDisruptionBudgetInterface { - return &FakePodDisruptionBudgets{c, namespace} + return newFakePodDisruptionBudgets(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 411181237..00e044961 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + applyconfigurationspolicyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type PodDisruptionBudgetsGetter interface { // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. type PodDisruptionBudgetInterface interface { - Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) - Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + Create(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*policyv1beta1.PodDisruptionBudget, error) + Update(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*policyv1beta1.PodDisruptionBudget, error) + List(ctx context.Context, opts v1.ListOptions) (*policyv1beta1.PodDisruptionBudgetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) - Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *policyv1beta1.PodDisruptionBudget, err error) + Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1beta1.PodDisruptionBudget, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) + ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - *gentype.ClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration] + *gentype.ClientWithListAndApply[*policyv1beta1.PodDisruptionBudget, *policyv1beta1.PodDisruptionBudgetList, *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - gentype.NewClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]( + gentype.NewClientWithListAndApply[*policyv1beta1.PodDisruptionBudget, *policyv1beta1.PodDisruptionBudgetList, *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration]( "poddisruptionbudgets", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} }, - func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }), + func() *policyv1beta1.PodDisruptionBudget { return &policyv1beta1.PodDisruptionBudget{} }, + func() *policyv1beta1.PodDisruptionBudgetList { return &policyv1beta1.PodDisruptionBudgetList{} }, + gentype.PrefersProtobuf[*policyv1beta1.PodDisruptionBudget](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index fdb509321..d8e78627e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + policyv1beta1 "k8s.io/api/policy/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *PolicyV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := policyv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 19fff0ee4..cccad0487 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (*v1.ClusterRole, error) - Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (*v1.ClusterRole, error) + Create(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts metav1.CreateOptions) (*rbacv1.ClusterRole, error) + Update(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts metav1.UpdateOptions) (*rbacv1.ClusterRole, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRole, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.ClusterRole, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.ClusterRoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) - Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.ClusterRole, err error) ClusterRoleExpansion } // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - *gentype.ClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1.ClusterRole, *rbacv1.ClusterRoleList, *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1Client) *clusterRoles { return &clusterRoles{ - gentype.NewClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1.ClusterRole, *rbacv1.ClusterRoleList, *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration]( "clusterroles", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ClusterRole { return &v1.ClusterRole{} }, - func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }), + func() *rbacv1.ClusterRole { return &rbacv1.ClusterRole{} }, + func() *rbacv1.ClusterRoleList { return &rbacv1.ClusterRoleList{} }, + gentype.PrefersProtobuf[*rbacv1.ClusterRole](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 77fb3785e..4a75fdcb5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (*v1.ClusterRoleBinding, error) - Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (*v1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts metav1.CreateOptions) (*rbacv1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts metav1.UpdateOptions) (*rbacv1.ClusterRoleBinding, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRoleBinding, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.ClusterRoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) - Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - *gentype.ClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBindingList, *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { return &clusterRoleBindings{ - gentype.NewClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBindingList, *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration]( "clusterrolebindings", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} }, - func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }), + func() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{} }, + func() *rbacv1.ClusterRoleBindingList { return &rbacv1.ClusterRoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1.ClusterRoleBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index 6df91b1a8..82fa39d06 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -19,133 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" ) -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { +// fakeClusterRoles implements ClusterRoleInterface +type fakeClusterRoles struct { + *gentype.FakeClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration] Fake *FakeRbacV1 } -var clusterrolesResource = v1.SchemeGroupVersion.WithResource("clusterroles") - -var clusterrolesKind = v1.SchemeGroupVersion.WithKind("ClusterRole") - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - emptyResult := &v1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRole), err -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - emptyResult := &v1.ClusterRoleList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ClusterRoleList{ListMeta: obj.(*v1.ClusterRoleList).ListMeta} - for _, item := range obj.(*v1.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { - emptyResult := &v1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRole), err -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { - emptyResult := &v1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRole), err -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1.ClusterRole{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ClusterRoleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { - emptyResult := &v1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRole), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - emptyResult := &v1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterRoles(fake *FakeRbacV1) typedrbacv1.ClusterRoleInterface { + return &fakeClusterRoles{ + gentype.NewFakeClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("clusterroles"), + v1.SchemeGroupVersion.WithKind("ClusterRole"), + func() *v1.ClusterRole { return &v1.ClusterRole{} }, + func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }, + func(dst, src *v1.ClusterRoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ClusterRoleList) []*v1.ClusterRole { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ClusterRoleList, items []*v1.ClusterRole) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index 6f3251408..3d1f0d4a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" ) -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { +// fakeClusterRoleBindings implements ClusterRoleBindingInterface +type fakeClusterRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration] Fake *FakeRbacV1 } -var clusterrolebindingsResource = v1.SchemeGroupVersion.WithResource("clusterrolebindings") - -var clusterrolebindingsKind = v1.SchemeGroupVersion.WithKind("ClusterRoleBinding") - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - emptyResult := &v1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - emptyResult := &v1.ClusterRoleBindingList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ClusterRoleBindingList{ListMeta: obj.(*v1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*v1.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { - emptyResult := &v1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { - emptyResult := &v1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1.ClusterRoleBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ClusterRoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - emptyResult := &v1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - emptyResult := &v1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterRoleBindings(fake *FakeRbacV1) typedrbacv1.ClusterRoleBindingInterface { + return &fakeClusterRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("clusterrolebindings"), + v1.SchemeGroupVersion.WithKind("ClusterRoleBinding"), + func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} }, + func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }, + func(dst, src *v1.ClusterRoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ClusterRoleBindingList) []*v1.ClusterRoleBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ClusterRoleBindingList, items []*v1.ClusterRoleBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go index 426fd70d6..db2b31d38 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go @@ -29,19 +29,19 @@ type FakeRbacV1 struct { } func (c *FakeRbacV1) ClusterRoles() v1.ClusterRoleInterface { - return &FakeClusterRoles{c} + return newFakeClusterRoles(c) } func (c *FakeRbacV1) ClusterRoleBindings() v1.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} + return newFakeClusterRoleBindings(c) } func (c *FakeRbacV1) Roles(namespace string) v1.RoleInterface { - return &FakeRoles{c, namespace} + return newFakeRoles(c, namespace) } func (c *FakeRbacV1) RoleBindings(namespace string) v1.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} + return newFakeRoleBindings(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index ba9161940..3baf41cc7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" ) -// FakeRoles implements RoleInterface -type FakeRoles struct { +// fakeRoles implements RoleInterface +type fakeRoles struct { + *gentype.FakeClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration] Fake *FakeRbacV1 - ns string -} - -var rolesResource = v1.SchemeGroupVersion.WithResource("roles") - -var rolesKind = v1.SchemeGroupVersion.WithKind("Role") - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { - emptyResult := &v1.Role{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Role), err -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { - emptyResult := &v1.RoleList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.RoleList{ListMeta: obj.(*v1.RoleList).ListMeta} - for _, item := range obj.(*v1.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) - } -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { - emptyResult := &v1.Role{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Role), err -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { - emptyResult := &v1.Role{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Role), err -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1.Role{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.RoleList{}) - return err -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { - emptyResult := &v1.Role{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.Role), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - emptyResult := &v1.Role{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeRoles(fake *FakeRbacV1, namespace string) typedrbacv1.RoleInterface { + return &fakeRoles{ + gentype.NewFakeClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("roles"), + v1.SchemeGroupVersion.WithKind("Role"), + func() *v1.Role { return &v1.Role{} }, + func() *v1.RoleList { return &v1.RoleList{} }, + func(dst, src *v1.RoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1.RoleList) []*v1.Role { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.RoleList, items []*v1.Role) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index 6d7d7d193..28246da57 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" ) -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { +// fakeRoleBindings implements RoleBindingInterface +type fakeRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration] Fake *FakeRbacV1 - ns string -} - -var rolebindingsResource = v1.SchemeGroupVersion.WithResource("rolebindings") - -var rolebindingsKind = v1.SchemeGroupVersion.WithKind("RoleBinding") - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - emptyResult := &v1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RoleBinding), err -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - emptyResult := &v1.RoleBindingList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.RoleBindingList{ListMeta: obj.(*v1.RoleBindingList).ListMeta} - for _, item := range obj.(*v1.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) - } -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { - emptyResult := &v1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RoleBinding), err -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { - emptyResult := &v1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RoleBinding), err -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1.RoleBinding{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.RoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { - emptyResult := &v1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.RoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - emptyResult := &v1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeRoleBindings(fake *FakeRbacV1, namespace string) typedrbacv1.RoleBindingInterface { + return &fakeRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("rolebindings"), + v1.SchemeGroupVersion.WithKind("RoleBinding"), + func() *v1.RoleBinding { return &v1.RoleBinding{} }, + func() *v1.RoleBindingList { return &v1.RoleBindingList{} }, + func(dst, src *v1.RoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1.RoleBindingList) []*v1.RoleBinding { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.RoleBindingList, items []*v1.RoleBinding) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index a02f0357d..c586ee638 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/rbac/v1" - "k8s.io/client-go/kubernetes/scheme" + rbacv1 "k8s.io/api/rbac/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := rbacv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index b75b055f0..c3a9ba135 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (*v1.Role, error) - Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (*v1.Role, error) + Create(ctx context.Context, role *rbacv1.Role, opts metav1.CreateOptions) (*rbacv1.Role, error) + Update(ctx context.Context, role *rbacv1.Role, opts metav1.UpdateOptions) (*rbacv1.Role, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Role, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.Role, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.RoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) - Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsrbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { - *gentype.ClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1.Role, *rbacv1.RoleList, *applyconfigurationsrbacv1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1Client, namespace string) *roles { return &roles{ - gentype.NewClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1.Role, *rbacv1.RoleList, *applyconfigurationsrbacv1.RoleApplyConfiguration]( "roles", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.Role { return &v1.Role{} }, - func() *v1.RoleList { return &v1.RoleList{} }), + func() *rbacv1.Role { return &rbacv1.Role{} }, + func() *rbacv1.RoleList { return &rbacv1.RoleList{} }, + gentype.PrefersProtobuf[*rbacv1.Role](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index fcbb1c0e2..1f5a39490 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (*v1.RoleBinding, error) - Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (*v1.RoleBinding, error) + Create(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts metav1.CreateOptions) (*rbacv1.RoleBinding, error) + Update(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts metav1.UpdateOptions) (*rbacv1.RoleBinding, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoleBinding, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleBindingList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.RoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.RoleBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) - Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.RoleBinding, err error) RoleBindingExpansion } // roleBindings implements RoleBindingInterface type roleBindings struct { - *gentype.ClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1.RoleBinding, *rbacv1.RoleBindingList, *applyconfigurationsrbacv1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { return &roleBindings{ - gentype.NewClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1.RoleBinding, *rbacv1.RoleBindingList, *applyconfigurationsrbacv1.RoleBindingApplyConfiguration]( "rolebindings", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.RoleBinding { return &v1.RoleBinding{} }, - func() *v1.RoleBindingList { return &v1.RoleBindingList{} }), + func() *rbacv1.RoleBinding { return &rbacv1.RoleBinding{} }, + func() *rbacv1.RoleBindingList { return &rbacv1.RoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1.RoleBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index f91e2c50a..3874f9dee 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (*v1alpha1.ClusterRole, error) - Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (*v1alpha1.ClusterRole, error) + Create(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRole, opts v1.CreateOptions) (*rbacv1alpha1.ClusterRole, error) + Update(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRole, opts v1.UpdateOptions) (*rbacv1alpha1.ClusterRole, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRole, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.ClusterRole, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.ClusterRoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) - Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.ClusterRole, err error) ClusterRoleExpansion } // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - *gentype.ClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1alpha1.ClusterRole, *rbacv1alpha1.ClusterRoleList, *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { return &clusterRoles{ - gentype.NewClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1alpha1.ClusterRole, *rbacv1alpha1.ClusterRoleList, *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration]( "clusterroles", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} }, - func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }), + func() *rbacv1alpha1.ClusterRole { return &rbacv1alpha1.ClusterRole{} }, + func() *rbacv1alpha1.ClusterRoleList { return &rbacv1alpha1.ClusterRoleList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.ClusterRole](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 3f04526f0..434f875f8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*v1alpha1.ClusterRoleBinding, error) - Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1alpha1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*rbacv1alpha1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*rbacv1alpha1.ClusterRoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.ClusterRoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) - Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - *gentype.ClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1alpha1.ClusterRoleBinding, *rbacv1alpha1.ClusterRoleBindingList, *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { return &clusterRoleBindings{ - gentype.NewClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1alpha1.ClusterRoleBinding, *rbacv1alpha1.ClusterRoleBindingList, *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration]( "clusterrolebindings", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} }, - func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }), + func() *rbacv1alpha1.ClusterRoleBinding { return &rbacv1alpha1.ClusterRoleBinding{} }, + func() *rbacv1alpha1.ClusterRoleBindingList { return &rbacv1alpha1.ClusterRoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.ClusterRoleBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 34c9a853e..668999da5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" ) -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { +// fakeClusterRoles implements ClusterRoleInterface +type fakeClusterRoles struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration] Fake *FakeRbacV1alpha1 } -var clusterrolesResource = v1alpha1.SchemeGroupVersion.WithResource("clusterroles") - -var clusterrolesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRole") - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - emptyResult := &v1alpha1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - emptyResult := &v1alpha1.ClusterRoleList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterRoleList{ListMeta: obj.(*v1alpha1.ClusterRoleList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { - emptyResult := &v1alpha1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { - emptyResult := &v1alpha1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1alpha1.ClusterRole{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - emptyResult := &v1alpha1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterRoles(fake *FakeRbacV1alpha1) typedrbacv1alpha1.ClusterRoleInterface { + return &fakeClusterRoles{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("clusterroles"), + v1alpha1.SchemeGroupVersion.WithKind("ClusterRole"), + func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} }, + func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }, + func(dst, src *v1alpha1.ClusterRoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ClusterRoleList) []*v1alpha1.ClusterRole { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ClusterRoleList, items []*v1alpha1.ClusterRole) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index d42f76342..6c275537d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" ) -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { +// fakeClusterRoleBindings implements ClusterRoleBindingInterface +type fakeClusterRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration] Fake *FakeRbacV1alpha1 } -var clusterrolebindingsResource = v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings") - -var clusterrolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding") - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - emptyResult := &v1alpha1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - emptyResult := &v1alpha1.ClusterRoleBindingList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterRoleBindingList{ListMeta: obj.(*v1alpha1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - emptyResult := &v1alpha1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - emptyResult := &v1alpha1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1alpha1.ClusterRoleBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - emptyResult := &v1alpha1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - emptyResult := &v1alpha1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterRoleBindings(fake *FakeRbacV1alpha1) typedrbacv1alpha1.ClusterRoleBindingInterface { + return &fakeClusterRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings"), + v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding"), + func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} }, + func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }, + func(dst, src *v1alpha1.ClusterRoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ClusterRoleBindingList) []*v1alpha1.ClusterRoleBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ClusterRoleBindingList, items []*v1alpha1.ClusterRoleBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go index 3447e9be8..df66b5ea9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go @@ -29,19 +29,19 @@ type FakeRbacV1alpha1 struct { } func (c *FakeRbacV1alpha1) ClusterRoles() v1alpha1.ClusterRoleInterface { - return &FakeClusterRoles{c} + return newFakeClusterRoles(c) } func (c *FakeRbacV1alpha1) ClusterRoleBindings() v1alpha1.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} + return newFakeClusterRoleBindings(c) } func (c *FakeRbacV1alpha1) Roles(namespace string) v1alpha1.RoleInterface { - return &FakeRoles{c, namespace} + return newFakeRoles(c, namespace) } func (c *FakeRbacV1alpha1) RoleBindings(namespace string) v1alpha1.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} + return newFakeRoleBindings(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 9b0ba7cac..21ed226cd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" ) -// FakeRoles implements RoleInterface -type FakeRoles struct { +// fakeRoles implements RoleInterface +type fakeRoles struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration] Fake *FakeRbacV1alpha1 - ns string -} - -var rolesResource = v1alpha1.SchemeGroupVersion.WithResource("roles") - -var rolesKind = v1alpha1.SchemeGroupVersion.WithKind("Role") - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - emptyResult := &v1alpha1.Role{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.Role), err -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - emptyResult := &v1alpha1.RoleList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RoleList{ListMeta: obj.(*v1alpha1.RoleList).ListMeta} - for _, item := range obj.(*v1alpha1.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) - } -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { - emptyResult := &v1alpha1.Role{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.Role), err -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { - emptyResult := &v1alpha1.Role{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.Role), err -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1alpha1.Role{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) - return err -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { - emptyResult := &v1alpha1.Role{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.Role), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - emptyResult := &v1alpha1.Role{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeRoles(fake *FakeRbacV1alpha1, namespace string) typedrbacv1alpha1.RoleInterface { + return &fakeRoles{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("roles"), + v1alpha1.SchemeGroupVersion.WithKind("Role"), + func() *v1alpha1.Role { return &v1alpha1.Role{} }, + func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }, + func(dst, src *v1alpha1.RoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.RoleList) []*v1alpha1.Role { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.RoleList, items []*v1alpha1.Role) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1alpha1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index f572945ac..c27d5f262 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" ) -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { +// fakeRoleBindings implements RoleBindingInterface +type fakeRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration] Fake *FakeRbacV1alpha1 - ns string -} - -var rolebindingsResource = v1alpha1.SchemeGroupVersion.WithResource("rolebindings") - -var rolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("RoleBinding") - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - emptyResult := &v1alpha1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - emptyResult := &v1alpha1.RoleBindingList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RoleBindingList{ListMeta: obj.(*v1alpha1.RoleBindingList).ListMeta} - for _, item := range obj.(*v1alpha1.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) - } -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { - emptyResult := &v1alpha1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { - emptyResult := &v1alpha1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1alpha1.RoleBinding{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - emptyResult := &v1alpha1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - emptyResult := &v1alpha1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeRoleBindings(fake *FakeRbacV1alpha1, namespace string) typedrbacv1alpha1.RoleBindingInterface { + return &fakeRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("rolebindings"), + v1alpha1.SchemeGroupVersion.WithKind("RoleBinding"), + func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} }, + func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }, + func(dst, src *v1alpha1.RoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.RoleBindingList) []*v1alpha1.RoleBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.RoleBindingList, items []*v1alpha1.RoleBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index cc5b309e9..df46fc3aa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := rbacv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 4a1876a7d..3a47f673e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (*v1alpha1.Role, error) - Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (*v1alpha1.Role, error) + Create(ctx context.Context, role *rbacv1alpha1.Role, opts v1.CreateOptions) (*rbacv1alpha1.Role, error) + Update(ctx context.Context, role *rbacv1alpha1.Role, opts v1.UpdateOptions) (*rbacv1alpha1.Role, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Role, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.Role, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.RoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) - Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { - *gentype.ClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1alpha1.Role, *rbacv1alpha1.RoleList, *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1alpha1Client, namespace string) *roles { return &roles{ - gentype.NewClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1alpha1.Role, *rbacv1alpha1.RoleList, *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration]( "roles", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha1.Role { return &v1alpha1.Role{} }, - func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }), + func() *rbacv1alpha1.Role { return &rbacv1alpha1.Role{} }, + func() *rbacv1alpha1.RoleList { return &rbacv1alpha1.RoleList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.Role](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 6473132f1..a6293171d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error) - Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error) + Create(ctx context.Context, roleBinding *rbacv1alpha1.RoleBinding, opts v1.CreateOptions) (*rbacv1alpha1.RoleBinding, error) + Update(ctx context.Context, roleBinding *rbacv1alpha1.RoleBinding, opts v1.UpdateOptions) (*rbacv1alpha1.RoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.RoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.RoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) - Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.RoleBinding, err error) RoleBindingExpansion } // roleBindings implements RoleBindingInterface type roleBindings struct { - *gentype.ClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1alpha1.RoleBinding, *rbacv1alpha1.RoleBindingList, *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { return &roleBindings{ - gentype.NewClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1alpha1.RoleBinding, *rbacv1alpha1.RoleBindingList, *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration]( "rolebindings", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} }, - func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }), + func() *rbacv1alpha1.RoleBinding { return &rbacv1alpha1.RoleBinding{} }, + func() *rbacv1alpha1.RoleBindingList { return &rbacv1alpha1.RoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.RoleBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index ed398333a..92388f2f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (*v1beta1.ClusterRole, error) - Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (*v1beta1.ClusterRole, error) + Create(ctx context.Context, clusterRole *rbacv1beta1.ClusterRole, opts v1.CreateOptions) (*rbacv1beta1.ClusterRole, error) + Update(ctx context.Context, clusterRole *rbacv1beta1.ClusterRole, opts v1.UpdateOptions) (*rbacv1beta1.ClusterRole, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRole, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.ClusterRole, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.ClusterRoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) - Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.ClusterRole, err error) ClusterRoleExpansion } // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - *gentype.ClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1beta1.ClusterRole, *rbacv1beta1.ClusterRoleList, *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { return &clusterRoles{ - gentype.NewClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1beta1.ClusterRole, *rbacv1beta1.ClusterRoleList, *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration]( "clusterroles", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} }, - func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }), + func() *rbacv1beta1.ClusterRole { return &rbacv1beta1.ClusterRole{} }, + func() *rbacv1beta1.ClusterRoleList { return &rbacv1beta1.ClusterRoleList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.ClusterRole](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 3010a99ae..beb50f7b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*v1beta1.ClusterRoleBinding, error) - Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1beta1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*rbacv1beta1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*rbacv1beta1.ClusterRoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.ClusterRoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) - Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - *gentype.ClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1beta1.ClusterRoleBinding, *rbacv1beta1.ClusterRoleBindingList, *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { return &clusterRoleBindings{ - gentype.NewClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1beta1.ClusterRoleBinding, *rbacv1beta1.ClusterRoleBindingList, *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration]( "clusterrolebindings", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} }, - func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }), + func() *rbacv1beta1.ClusterRoleBinding { return &rbacv1beta1.ClusterRoleBinding{} }, + func() *rbacv1beta1.ClusterRoleBindingList { return &rbacv1beta1.ClusterRoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.ClusterRoleBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index b7996c106..55314691a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" ) -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { +// fakeClusterRoles implements ClusterRoleInterface +type fakeClusterRoles struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration] Fake *FakeRbacV1beta1 } -var clusterrolesResource = v1beta1.SchemeGroupVersion.WithResource("clusterroles") - -var clusterrolesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRole") - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - emptyResult := &v1beta1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - emptyResult := &v1beta1.ClusterRoleList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ClusterRoleList{ListMeta: obj.(*v1beta1.ClusterRoleList).ListMeta} - for _, item := range obj.(*v1beta1.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { - emptyResult := &v1beta1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { - emptyResult := &v1beta1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1beta1.ClusterRole{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { - emptyResult := &v1beta1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - emptyResult := &v1beta1.ClusterRole{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterRoles(fake *FakeRbacV1beta1) typedrbacv1beta1.ClusterRoleInterface { + return &fakeClusterRoles{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("clusterroles"), + v1beta1.SchemeGroupVersion.WithKind("ClusterRole"), + func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} }, + func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }, + func(dst, src *v1beta1.ClusterRoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ClusterRoleList) []*v1beta1.ClusterRole { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.ClusterRoleList, items []*v1beta1.ClusterRole) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 8843757ac..63c194567 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" ) -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { +// fakeClusterRoleBindings implements ClusterRoleBindingInterface +type fakeClusterRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration] Fake *FakeRbacV1beta1 } -var clusterrolebindingsResource = v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings") - -var clusterrolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding") - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - emptyResult := &v1beta1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - emptyResult := &v1beta1.ClusterRoleBindingList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ClusterRoleBindingList{ListMeta: obj.(*v1beta1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*v1beta1.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - emptyResult := &v1beta1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - emptyResult := &v1beta1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1beta1.ClusterRoleBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - emptyResult := &v1beta1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - emptyResult := &v1beta1.ClusterRoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeClusterRoleBindings(fake *FakeRbacV1beta1) typedrbacv1beta1.ClusterRoleBindingInterface { + return &fakeClusterRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings"), + v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding"), + func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} }, + func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }, + func(dst, src *v1beta1.ClusterRoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ClusterRoleBindingList) []*v1beta1.ClusterRoleBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ClusterRoleBindingList, items []*v1beta1.ClusterRoleBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go index bdbc246b7..7cfbbe619 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go @@ -29,19 +29,19 @@ type FakeRbacV1beta1 struct { } func (c *FakeRbacV1beta1) ClusterRoles() v1beta1.ClusterRoleInterface { - return &FakeClusterRoles{c} + return newFakeClusterRoles(c) } func (c *FakeRbacV1beta1) ClusterRoleBindings() v1beta1.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} + return newFakeClusterRoleBindings(c) } func (c *FakeRbacV1beta1) Roles(namespace string) v1beta1.RoleInterface { - return &FakeRoles{c, namespace} + return newFakeRoles(c, namespace) } func (c *FakeRbacV1beta1) RoleBindings(namespace string) v1beta1.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} + return newFakeRoleBindings(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index aa0fe28a1..44367168c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -19,142 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" ) -// FakeRoles implements RoleInterface -type FakeRoles struct { +// fakeRoles implements RoleInterface +type fakeRoles struct { + *gentype.FakeClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration] Fake *FakeRbacV1beta1 - ns string -} - -var rolesResource = v1beta1.SchemeGroupVersion.WithResource("roles") - -var rolesKind = v1beta1.SchemeGroupVersion.WithKind("Role") - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - emptyResult := &v1beta1.Role{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Role), err -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - emptyResult := &v1beta1.RoleList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.RoleList{ListMeta: obj.(*v1beta1.RoleList).ListMeta} - for _, item := range obj.(*v1beta1.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) - } -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { - emptyResult := &v1beta1.Role{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Role), err -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { - emptyResult := &v1beta1.Role{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Role), err -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1beta1.Role{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) - return err -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { - emptyResult := &v1beta1.Role{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Role), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - emptyResult := &v1beta1.Role{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeRoles(fake *FakeRbacV1beta1, namespace string) typedrbacv1beta1.RoleInterface { + return &fakeRoles{ + gentype.NewFakeClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("roles"), + v1beta1.SchemeGroupVersion.WithKind("Role"), + func() *v1beta1.Role { return &v1beta1.Role{} }, + func() *v1beta1.RoleList { return &v1beta1.RoleList{} }, + func(dst, src *v1beta1.RoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.RoleList) []*v1beta1.Role { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.RoleList, items []*v1beta1.Role) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1beta1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 26c3c8045..c42560b1d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -19,142 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedrbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" ) -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { +// fakeRoleBindings implements RoleBindingInterface +type fakeRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration] Fake *FakeRbacV1beta1 - ns string -} - -var rolebindingsResource = v1beta1.SchemeGroupVersion.WithResource("rolebindings") - -var rolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("RoleBinding") - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - emptyResult := &v1beta1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - emptyResult := &v1beta1.RoleBindingList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.RoleBindingList{ListMeta: obj.(*v1beta1.RoleBindingList).ListMeta} - for _, item := range obj.(*v1beta1.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) - } -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { - emptyResult := &v1beta1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { - emptyResult := &v1beta1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1beta1.RoleBinding{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { - emptyResult := &v1beta1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - emptyResult := &v1beta1.RoleBinding{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeRoleBindings(fake *FakeRbacV1beta1, namespace string) typedrbacv1beta1.RoleBindingInterface { + return &fakeRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("rolebindings"), + v1beta1.SchemeGroupVersion.WithKind("RoleBinding"), + func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} }, + func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }, + func(dst, src *v1beta1.RoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.RoleBindingList) []*v1beta1.RoleBinding { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.RoleBindingList, items []*v1beta1.RoleBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 8dac5c1d4..5739bb289 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := rbacv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 92e51da1b..700fc6d22 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (*v1beta1.Role, error) - Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (*v1beta1.Role, error) + Create(ctx context.Context, role *rbacv1beta1.Role, opts v1.CreateOptions) (*rbacv1beta1.Role, error) + Update(ctx context.Context, role *rbacv1beta1.Role, opts v1.UpdateOptions) (*rbacv1beta1.Role, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Role, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.Role, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.RoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) - Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsrbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { - *gentype.ClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1beta1.Role, *rbacv1beta1.RoleList, *applyconfigurationsrbacv1beta1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1beta1Client, namespace string) *roles { return &roles{ - gentype.NewClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1beta1.Role, *rbacv1beta1.RoleList, *applyconfigurationsrbacv1beta1.RoleApplyConfiguration]( "roles", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.Role { return &v1beta1.Role{} }, - func() *v1beta1.RoleList { return &v1beta1.RoleList{} }), + func() *rbacv1beta1.Role { return &rbacv1beta1.Role{} }, + func() *rbacv1beta1.RoleList { return &rbacv1beta1.RoleList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.Role](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index ad31bd051..0f423a0d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (*v1beta1.RoleBinding, error) - Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (*v1beta1.RoleBinding, error) + Create(ctx context.Context, roleBinding *rbacv1beta1.RoleBinding, opts v1.CreateOptions) (*rbacv1beta1.RoleBinding, error) + Update(ctx context.Context, roleBinding *rbacv1beta1.RoleBinding, opts v1.UpdateOptions) (*rbacv1beta1.RoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.RoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.RoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) - Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.RoleBinding, err error) RoleBindingExpansion } // roleBindings implements RoleBindingInterface type roleBindings struct { - *gentype.ClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration] + *gentype.ClientWithListAndApply[*rbacv1beta1.RoleBinding, *rbacv1beta1.RoleBindingList, *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { return &roleBindings{ - gentype.NewClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]( + gentype.NewClientWithListAndApply[*rbacv1beta1.RoleBinding, *rbacv1beta1.RoleBindingList, *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration]( "rolebindings", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} }, - func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }), + func() *rbacv1beta1.RoleBinding { return &rbacv1beta1.RoleBinding{} }, + func() *rbacv1beta1.RoleBindingList { return &rbacv1beta1.RoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.RoleBinding](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go index 35455dfa3..6cdf57c53 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type DeviceClassesGetter interface { // DeviceClassInterface has methods to work with DeviceClass resources. type DeviceClassInterface interface { - Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (*v1alpha3.DeviceClass, error) - Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (*v1alpha3.DeviceClass, error) + Create(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClass, opts v1.CreateOptions) (*resourcev1alpha3.DeviceClass, error) + Update(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClass, opts v1.UpdateOptions) (*resourcev1alpha3.DeviceClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.DeviceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.DeviceClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.DeviceClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) - Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.DeviceClass, err error) DeviceClassExpansion } // deviceClasses implements DeviceClassInterface type deviceClasses struct { - *gentype.ClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration] + *gentype.ClientWithListAndApply[*resourcev1alpha3.DeviceClass, *resourcev1alpha3.DeviceClassList, *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration] } // newDeviceClasses returns a DeviceClasses func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses { return &deviceClasses{ - gentype.NewClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*resourcev1alpha3.DeviceClass, *resourcev1alpha3.DeviceClassList, *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration]( "deviceclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} }, - func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }), + func() *resourcev1alpha3.DeviceClass { return &resourcev1alpha3.DeviceClass{} }, + func() *resourcev1alpha3.DeviceClassList { return &resourcev1alpha3.DeviceClassList{} }, + gentype.PrefersProtobuf[*resourcev1alpha3.DeviceClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go index d96cbd221..0f365b29b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" ) -// FakeDeviceClasses implements DeviceClassInterface -type FakeDeviceClasses struct { +// fakeDeviceClasses implements DeviceClassInterface +type fakeDeviceClasses struct { + *gentype.FakeClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration] Fake *FakeResourceV1alpha3 } -var deviceclassesResource = v1alpha3.SchemeGroupVersion.WithResource("deviceclasses") - -var deviceclassesKind = v1alpha3.SchemeGroupVersion.WithKind("DeviceClass") - -// Get takes name of the deviceClass, and returns the corresponding deviceClass object, and an error if there is any. -func (c *FakeDeviceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.DeviceClass, err error) { - emptyResult := &v1alpha3.DeviceClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(deviceclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.DeviceClass), err -} - -// List takes label and field selectors, and returns the list of DeviceClasses that match those selectors. -func (c *FakeDeviceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.DeviceClassList, err error) { - emptyResult := &v1alpha3.DeviceClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(deviceclassesResource, deviceclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha3.DeviceClassList{ListMeta: obj.(*v1alpha3.DeviceClassList).ListMeta} - for _, item := range obj.(*v1alpha3.DeviceClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deviceClasses. -func (c *FakeDeviceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(deviceclassesResource, opts)) -} - -// Create takes the representation of a deviceClass and creates it. Returns the server's representation of the deviceClass, and an error, if there is any. -func (c *FakeDeviceClasses) Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (result *v1alpha3.DeviceClass, err error) { - emptyResult := &v1alpha3.DeviceClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.DeviceClass), err -} - -// Update takes the representation of a deviceClass and updates it. Returns the server's representation of the deviceClass, and an error, if there is any. -func (c *FakeDeviceClasses) Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (result *v1alpha3.DeviceClass, err error) { - emptyResult := &v1alpha3.DeviceClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.DeviceClass), err -} - -// Delete takes name of the deviceClass and deletes it. Returns an error if one occurs. -func (c *FakeDeviceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(deviceclassesResource, name, opts), &v1alpha3.DeviceClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeviceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(deviceclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha3.DeviceClassList{}) - return err -} - -// Patch applies the patch and returns the patched deviceClass. -func (c *FakeDeviceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) { - emptyResult := &v1alpha3.DeviceClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.DeviceClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deviceClass. -func (c *FakeDeviceClasses) Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) { - if deviceClass == nil { - return nil, fmt.Errorf("deviceClass provided to Apply must not be nil") - } - data, err := json.Marshal(deviceClass) - if err != nil { - return nil, err - } - name := deviceClass.Name - if name == nil { - return nil, fmt.Errorf("deviceClass.Name must be provided to Apply") - } - emptyResult := &v1alpha3.DeviceClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeDeviceClasses(fake *FakeResourceV1alpha3) typedresourcev1alpha3.DeviceClassInterface { + return &fakeDeviceClasses{ + gentype.NewFakeClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]( + fake.Fake, + "", + v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"), + v1alpha3.SchemeGroupVersion.WithKind("DeviceClass"), + func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} }, + func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }, + func(dst, src *v1alpha3.DeviceClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha3.DeviceClassList) []*v1alpha3.DeviceClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha3.DeviceClassList, items []*v1alpha3.DeviceClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha3.DeviceClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go deleted file mode 100644 index 54898993e..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - testing "k8s.io/client-go/testing" -) - -// FakePodSchedulingContexts implements PodSchedulingContextInterface -type FakePodSchedulingContexts struct { - Fake *FakeResourceV1alpha3 - ns string -} - -var podschedulingcontextsResource = v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts") - -var podschedulingcontextsKind = v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext") - -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(podschedulingcontextsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.PodSchedulingContextList, err error) { - emptyResult := &v1alpha3.PodSchedulingContextList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha3.PodSchedulingContextList{ListMeta: obj.(*v1alpha3.PodSchedulingContextList).ListMeta} - for _, item := range obj.(*v1alpha3.PodSchedulingContextList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSchedulingContexts. -func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(podschedulingcontextsResource, c.ns, opts)) - -} - -// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(podschedulingcontextsResource, "status", c.ns, podSchedulingContext, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. -func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha3.PodSchedulingContext{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(podschedulingcontextsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha3.PodSchedulingContextList{}) - return err -} - -// Patch applies the patch and returns the patched podSchedulingContext. -func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go index 4523d9f09..83dfdb2b9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go @@ -29,23 +29,19 @@ type FakeResourceV1alpha3 struct { } func (c *FakeResourceV1alpha3) DeviceClasses() v1alpha3.DeviceClassInterface { - return &FakeDeviceClasses{c} -} - -func (c *FakeResourceV1alpha3) PodSchedulingContexts(namespace string) v1alpha3.PodSchedulingContextInterface { - return &FakePodSchedulingContexts{c, namespace} + return newFakeDeviceClasses(c) } func (c *FakeResourceV1alpha3) ResourceClaims(namespace string) v1alpha3.ResourceClaimInterface { - return &FakeResourceClaims{c, namespace} + return newFakeResourceClaims(c, namespace) } func (c *FakeResourceV1alpha3) ResourceClaimTemplates(namespace string) v1alpha3.ResourceClaimTemplateInterface { - return &FakeResourceClaimTemplates{c, namespace} + return newFakeResourceClaimTemplates(c, namespace) } func (c *FakeResourceV1alpha3) ResourceSlices() v1alpha3.ResourceSliceInterface { - return &FakeResourceSlices{c} + return newFakeResourceSlices(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go index db38b3d60..502d4f2e2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go @@ -19,179 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" ) -// FakeResourceClaims implements ResourceClaimInterface -type FakeResourceClaims struct { +// fakeResourceClaims implements ResourceClaimInterface +type fakeResourceClaims struct { + *gentype.FakeClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration] Fake *FakeResourceV1alpha3 - ns string -} - -var resourceclaimsResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaims") - -var resourceclaimsKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim") - -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaim, err error) { - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(resourceclaimsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaim), err -} - -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimList, err error) { - emptyResult := &v1alpha3.ResourceClaimList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(resourceclaimsResource, resourceclaimsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha3.ResourceClaimList{ListMeta: obj.(*v1alpha3.ResourceClaimList).ListMeta} - for _, item := range obj.(*v1alpha3.ResourceClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClaims. -func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimsResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (result *v1alpha3.ResourceClaim, err error) { - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaim), err -} - -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) { - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaim), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) { - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(resourceclaimsResource, "status", c.ns, resourceClaim, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaim), err -} - -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. -func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha3.ResourceClaim{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(resourceclaimsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClaim. -func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) { - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaim), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaim), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - emptyResult := &v1alpha3.ResourceClaim{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeResourceClaims(fake *FakeResourceV1alpha3, namespace string) typedresourcev1alpha3.ResourceClaimInterface { + return &fakeResourceClaims{ + gentype.NewFakeClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]( + fake.Fake, + namespace, + v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"), + v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim"), + func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} }, + func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }, + func(dst, src *v1alpha3.ResourceClaimList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha3.ResourceClaimList) []*v1alpha3.ResourceClaim { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha3.ResourceClaimList, items []*v1alpha3.ResourceClaim) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha3.ResourceClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go index 28db7261f..0ecd4c25e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" ) -// FakeResourceClaimTemplates implements ResourceClaimTemplateInterface -type FakeResourceClaimTemplates struct { +// fakeResourceClaimTemplates implements ResourceClaimTemplateInterface +type fakeResourceClaimTemplates struct { + *gentype.FakeClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration] Fake *FakeResourceV1alpha3 - ns string -} - -var resourceclaimtemplatesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates") - -var resourceclaimtemplatesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate") - -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { - emptyResult := &v1alpha3.ResourceClaimTemplate{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(resourceclaimtemplatesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaimTemplate), err -} - -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimTemplateList, err error) { - emptyResult := &v1alpha3.ResourceClaimTemplateList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha3.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha3.ResourceClaimTemplateList).ListMeta} - for _, item := range obj.(*v1alpha3.ResourceClaimTemplateList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates. -func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimtemplatesResource, c.ns, opts)) - } -// Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { - emptyResult := &v1alpha3.ResourceClaimTemplate{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaimTemplate), err -} - -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { - emptyResult := &v1alpha3.ResourceClaimTemplate{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaimTemplate), err -} - -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. -func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha3.ResourceClaimTemplate{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(resourceclaimtemplatesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimTemplateList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) { - emptyResult := &v1alpha3.ResourceClaimTemplate{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceClaimTemplate), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { - if resourceClaimTemplate == nil { - return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaimTemplate) - if err != nil { - return nil, err - } - name := resourceClaimTemplate.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") - } - emptyResult := &v1alpha3.ResourceClaimTemplate{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeResourceClaimTemplates(fake *FakeResourceV1alpha3, namespace string) typedresourcev1alpha3.ResourceClaimTemplateInterface { + return &fakeResourceClaimTemplates{ + gentype.NewFakeClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( + fake.Fake, + namespace, + v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"), + v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate"), + func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} }, + func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }, + func(dst, src *v1alpha3.ResourceClaimTemplateList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha3.ResourceClaimTemplateList) []*v1alpha3.ResourceClaimTemplate { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha3.ResourceClaimTemplateList, items []*v1alpha3.ResourceClaimTemplate) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha3.ResourceClaimTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go index c355fc454..0b9b01c00 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" ) -// FakeResourceSlices implements ResourceSliceInterface -type FakeResourceSlices struct { +// fakeResourceSlices implements ResourceSliceInterface +type fakeResourceSlices struct { + *gentype.FakeClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration] Fake *FakeResourceV1alpha3 } -var resourceslicesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceslices") - -var resourceslicesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice") - -// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any. -func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceSlice, err error) { - emptyResult := &v1alpha3.ResourceSlice{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(resourceslicesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceSlice), err -} - -// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors. -func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceSliceList, err error) { - emptyResult := &v1alpha3.ResourceSliceList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(resourceslicesResource, resourceslicesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha3.ResourceSliceList{ListMeta: obj.(*v1alpha3.ResourceSliceList).ListMeta} - for _, item := range obj.(*v1alpha3.ResourceSliceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceSlices. -func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(resourceslicesResource, opts)) -} - -// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (result *v1alpha3.ResourceSlice, err error) { - emptyResult := &v1alpha3.ResourceSlice{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceSlice), err -} - -// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha3.ResourceSlice, err error) { - emptyResult := &v1alpha3.ResourceSlice{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceSlice), err -} - -// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs. -func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha3.ResourceSlice{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(resourceslicesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha3.ResourceSliceList{}) - return err -} - -// Patch applies the patch and returns the patched resourceSlice. -func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) { - emptyResult := &v1alpha3.ResourceSlice{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.ResourceSlice), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice. -func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) { - if resourceSlice == nil { - return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil") - } - data, err := json.Marshal(resourceSlice) - if err != nil { - return nil, err - } - name := resourceSlice.Name - if name == nil { - return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply") - } - emptyResult := &v1alpha3.ResourceSlice{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeResourceSlices(fake *FakeResourceV1alpha3) typedresourcev1alpha3.ResourceSliceInterface { + return &fakeResourceSlices{ + gentype.NewFakeClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]( + fake.Fake, + "", + v1alpha3.SchemeGroupVersion.WithResource("resourceslices"), + v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice"), + func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} }, + func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }, + func(dst, src *v1alpha3.ResourceSliceList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha3.ResourceSliceList) []*v1alpha3.ResourceSlice { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha3.ResourceSliceList, items []*v1alpha3.ResourceSlice) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha3.ResourceSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go index 747e564b7..cd8862ea8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go @@ -20,8 +20,6 @@ package v1alpha3 type DeviceClassExpansion interface{} -type PodSchedulingContextExpansion interface{} - type ResourceClaimExpansion interface{} type ResourceClaimTemplateExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go deleted file mode 100644 index af5984321..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha3 - -import ( - "context" - - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - gentype "k8s.io/client-go/gentype" - scheme "k8s.io/client-go/kubernetes/scheme" -) - -// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. -// A group's client should implement this interface. -type PodSchedulingContextsGetter interface { - PodSchedulingContexts(namespace string) PodSchedulingContextInterface -} - -// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. -type PodSchedulingContextInterface interface { - Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha3.PodSchedulingContext, error) - Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) - // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.PodSchedulingContext, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.PodSchedulingContextList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) - Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) - // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) - PodSchedulingContextExpansion -} - -// podSchedulingContexts implements PodSchedulingContextInterface -type podSchedulingContexts struct { - *gentype.ClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration] -} - -// newPodSchedulingContexts returns a PodSchedulingContexts -func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts { - return &podSchedulingContexts{ - gentype.NewClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration]( - "podschedulingcontexts", - c.RESTClient(), - scheme.ParameterCodec, - namespace, - func() *v1alpha3.PodSchedulingContext { return &v1alpha3.PodSchedulingContext{} }, - func() *v1alpha3.PodSchedulingContextList { return &v1alpha3.PodSchedulingContextList{} }), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go index 879f0990d..acc9b97c2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go @@ -19,17 +19,16 @@ limitations under the License. package v1alpha3 import ( - "net/http" + http "net/http" - v1alpha3 "k8s.io/api/resource/v1alpha3" - "k8s.io/client-go/kubernetes/scheme" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type ResourceV1alpha3Interface interface { RESTClient() rest.Interface DeviceClassesGetter - PodSchedulingContextsGetter ResourceClaimsGetter ResourceClaimTemplatesGetter ResourceSlicesGetter @@ -44,10 +43,6 @@ func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface { return newDeviceClasses(c) } -func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { - return newPodSchedulingContexts(c, namespace) -} - func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } @@ -105,10 +100,10 @@ func New(c rest.Interface) *ResourceV1alpha3Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha3.SchemeGroupVersion + gv := resourcev1alpha3.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go index 2ac65c005..a95ac56d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type ResourceClaimsGetter interface { // ResourceClaimInterface has methods to work with ResourceClaim resources. type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (*v1alpha3.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + Create(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.CreateOptions) (*resourcev1alpha3.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaim, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + UpdateStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaim, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + ApplyStatus(ctx context.Context, resourceClaim *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaim, err error) ResourceClaimExpansion } // resourceClaims implements ResourceClaimInterface type resourceClaims struct { - *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration] + *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceClaim, *resourcev1alpha3.ResourceClaimList, *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration] } // newResourceClaims returns a ResourceClaims func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims { return &resourceClaims{ - gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]( + gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceClaim, *resourcev1alpha3.ResourceClaimList, *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration]( "resourceclaims", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} }, - func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }), + func() *resourcev1alpha3.ResourceClaim { return &resourcev1alpha3.ResourceClaim{} }, + func() *resourcev1alpha3.ResourceClaimList { return &resourcev1alpha3.ResourceClaimList{} }, + gentype.PrefersProtobuf[*resourcev1alpha3.ResourceClaim](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go index 87997bfee..a8ba1f696 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,36 @@ type ResourceClaimTemplatesGetter interface { // ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha3.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Create(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*resourcev1alpha3.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaimTemplate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimTemplateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceClaimTemplateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaimTemplate, err error) ResourceClaimTemplateExpansion } // resourceClaimTemplates implements ResourceClaimTemplateInterface type resourceClaimTemplates struct { - *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration] + *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceClaimTemplate, *resourcev1alpha3.ResourceClaimTemplateList, *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration] } // newResourceClaimTemplates returns a ResourceClaimTemplates func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates { return &resourceClaimTemplates{ - gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( + gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceClaimTemplate, *resourcev1alpha3.ResourceClaimTemplateList, *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( "resourceclaimtemplates", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} }, - func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }), + func() *resourcev1alpha3.ResourceClaimTemplate { return &resourcev1alpha3.ResourceClaimTemplate{} }, + func() *resourcev1alpha3.ResourceClaimTemplateList { + return &resourcev1alpha3.ResourceClaimTemplateList{} + }, + gentype.PrefersProtobuf[*resourcev1alpha3.ResourceClaimTemplate](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go index 081904140..91dfce5ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha3 import ( - "context" + context "context" - v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type ResourceSlicesGetter interface { // ResourceSliceInterface has methods to work with ResourceSlice resources. type ResourceSliceInterface interface { - Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (*v1alpha3.ResourceSlice, error) - Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (*v1alpha3.ResourceSlice, error) + Create(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSlice, opts v1.CreateOptions) (*resourcev1alpha3.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSlice, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceSlice, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceSlice, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceSliceList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceSliceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) - Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceSlice, err error) ResourceSliceExpansion } // resourceSlices implements ResourceSliceInterface type resourceSlices struct { - *gentype.ClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration] + *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceSlice, *resourcev1alpha3.ResourceSliceList, *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration] } // newResourceSlices returns a ResourceSlices func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices { return &resourceSlices{ - gentype.NewClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]( + gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceSlice, *resourcev1alpha3.ResourceSliceList, *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration]( "resourceslices", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} }, - func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }), + func() *resourcev1alpha3.ResourceSlice { return &resourcev1alpha3.ResourceSlice{} }, + func() *resourcev1alpha3.ResourceSliceList { return &resourcev1alpha3.ResourceSliceList{} }, + gentype.PrefersProtobuf[*resourcev1alpha3.ResourceSlice](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..e41416d39 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// DeviceClassesGetter has a method to return a DeviceClassInterface. +// A group's client should implement this interface. +type DeviceClassesGetter interface { + DeviceClasses() DeviceClassInterface +} + +// DeviceClassInterface has methods to work with DeviceClass resources. +type DeviceClassInterface interface { + Create(ctx context.Context, deviceClass *resourcev1beta1.DeviceClass, opts v1.CreateOptions) (*resourcev1beta1.DeviceClass, error) + Update(ctx context.Context, deviceClass *resourcev1beta1.DeviceClass, opts v1.UpdateOptions) (*resourcev1beta1.DeviceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.DeviceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.DeviceClass, err error) + DeviceClassExpansion +} + +// deviceClasses implements DeviceClassInterface +type deviceClasses struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.DeviceClass, *resourcev1beta1.DeviceClassList, *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration] +} + +// newDeviceClasses returns a DeviceClasses +func newDeviceClasses(c *ResourceV1beta1Client) *deviceClasses { + return &deviceClasses{ + gentype.NewClientWithListAndApply[*resourcev1beta1.DeviceClass, *resourcev1beta1.DeviceClassList, *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration]( + "deviceclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *resourcev1beta1.DeviceClass { return &resourcev1beta1.DeviceClass{} }, + func() *resourcev1beta1.DeviceClassList { return &resourcev1beta1.DeviceClassList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.DeviceClass](), + ), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_deviceclass.go new file mode 100644 index 000000000..335629899 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_deviceclass.go @@ -0,0 +1,51 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/resource/v1beta1" + resourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + typedresourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" +) + +// fakeDeviceClasses implements DeviceClassInterface +type fakeDeviceClasses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.DeviceClass, *v1beta1.DeviceClassList, *resourcev1beta1.DeviceClassApplyConfiguration] + Fake *FakeResourceV1beta1 +} + +func newFakeDeviceClasses(fake *FakeResourceV1beta1) typedresourcev1beta1.DeviceClassInterface { + return &fakeDeviceClasses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.DeviceClass, *v1beta1.DeviceClassList, *resourcev1beta1.DeviceClassApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("deviceclasses"), + v1beta1.SchemeGroupVersion.WithKind("DeviceClass"), + func() *v1beta1.DeviceClass { return &v1beta1.DeviceClass{} }, + func() *v1beta1.DeviceClassList { return &v1beta1.DeviceClassList{} }, + func(dst, src *v1beta1.DeviceClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.DeviceClassList) []*v1beta1.DeviceClass { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.DeviceClassList, items []*v1beta1.DeviceClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resource_client.go new file mode 100644 index 000000000..3cb54e9ce --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resource_client.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeResourceV1beta1 struct { + *testing.Fake +} + +func (c *FakeResourceV1beta1) DeviceClasses() v1beta1.DeviceClassInterface { + return newFakeDeviceClasses(c) +} + +func (c *FakeResourceV1beta1) ResourceClaims(namespace string) v1beta1.ResourceClaimInterface { + return newFakeResourceClaims(c, namespace) +} + +func (c *FakeResourceV1beta1) ResourceClaimTemplates(namespace string) v1beta1.ResourceClaimTemplateInterface { + return newFakeResourceClaimTemplates(c, namespace) +} + +func (c *FakeResourceV1beta1) ResourceSlices() v1beta1.ResourceSliceInterface { + return newFakeResourceSlices(c) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeResourceV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaim.go new file mode 100644 index 000000000..ca03121a8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaim.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/resource/v1beta1" + resourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + typedresourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" +) + +// fakeResourceClaims implements ResourceClaimInterface +type fakeResourceClaims struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ResourceClaim, *v1beta1.ResourceClaimList, *resourcev1beta1.ResourceClaimApplyConfiguration] + Fake *FakeResourceV1beta1 +} + +func newFakeResourceClaims(fake *FakeResourceV1beta1, namespace string) typedresourcev1beta1.ResourceClaimInterface { + return &fakeResourceClaims{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ResourceClaim, *v1beta1.ResourceClaimList, *resourcev1beta1.ResourceClaimApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("resourceclaims"), + v1beta1.SchemeGroupVersion.WithKind("ResourceClaim"), + func() *v1beta1.ResourceClaim { return &v1beta1.ResourceClaim{} }, + func() *v1beta1.ResourceClaimList { return &v1beta1.ResourceClaimList{} }, + func(dst, src *v1beta1.ResourceClaimList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ResourceClaimList) []*v1beta1.ResourceClaim { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ResourceClaimList, items []*v1beta1.ResourceClaim) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaimtemplate.go new file mode 100644 index 000000000..2ee43d458 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaimtemplate.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/resource/v1beta1" + resourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + typedresourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" +) + +// fakeResourceClaimTemplates implements ResourceClaimTemplateInterface +type fakeResourceClaimTemplates struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ResourceClaimTemplate, *v1beta1.ResourceClaimTemplateList, *resourcev1beta1.ResourceClaimTemplateApplyConfiguration] + Fake *FakeResourceV1beta1 +} + +func newFakeResourceClaimTemplates(fake *FakeResourceV1beta1, namespace string) typedresourcev1beta1.ResourceClaimTemplateInterface { + return &fakeResourceClaimTemplates{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ResourceClaimTemplate, *v1beta1.ResourceClaimTemplateList, *resourcev1beta1.ResourceClaimTemplateApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("resourceclaimtemplates"), + v1beta1.SchemeGroupVersion.WithKind("ResourceClaimTemplate"), + func() *v1beta1.ResourceClaimTemplate { return &v1beta1.ResourceClaimTemplate{} }, + func() *v1beta1.ResourceClaimTemplateList { return &v1beta1.ResourceClaimTemplateList{} }, + func(dst, src *v1beta1.ResourceClaimTemplateList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ResourceClaimTemplateList) []*v1beta1.ResourceClaimTemplate { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ResourceClaimTemplateList, items []*v1beta1.ResourceClaimTemplate) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceslice.go new file mode 100644 index 000000000..d6d64b7d3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceslice.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/resource/v1beta1" + resourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + typedresourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" +) + +// fakeResourceSlices implements ResourceSliceInterface +type fakeResourceSlices struct { + *gentype.FakeClientWithListAndApply[*v1beta1.ResourceSlice, *v1beta1.ResourceSliceList, *resourcev1beta1.ResourceSliceApplyConfiguration] + Fake *FakeResourceV1beta1 +} + +func newFakeResourceSlices(fake *FakeResourceV1beta1) typedresourcev1beta1.ResourceSliceInterface { + return &fakeResourceSlices{ + gentype.NewFakeClientWithListAndApply[*v1beta1.ResourceSlice, *v1beta1.ResourceSliceList, *resourcev1beta1.ResourceSliceApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("resourceslices"), + v1beta1.SchemeGroupVersion.WithKind("ResourceSlice"), + func() *v1beta1.ResourceSlice { return &v1beta1.ResourceSlice{} }, + func() *v1beta1.ResourceSliceList { return &v1beta1.ResourceSliceList{} }, + func(dst, src *v1beta1.ResourceSliceList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.ResourceSliceList) []*v1beta1.ResourceSlice { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.ResourceSliceList, items []*v1beta1.ResourceSlice) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go new file mode 100644 index 000000000..d5fcfc214 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type DeviceClassExpansion interface{} + +type ResourceClaimExpansion interface{} + +type ResourceClaimTemplateExpansion interface{} + +type ResourceSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go new file mode 100644 index 000000000..c6a3b2836 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + http "net/http" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type ResourceV1beta1Interface interface { + RESTClient() rest.Interface + DeviceClassesGetter + ResourceClaimsGetter + ResourceClaimTemplatesGetter + ResourceSlicesGetter +} + +// ResourceV1beta1Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1beta1Client struct { + restClient rest.Interface +} + +func (c *ResourceV1beta1Client) DeviceClasses() DeviceClassInterface { + return newDeviceClasses(c) +} + +func (c *ResourceV1beta1Client) ResourceClaims(namespace string) ResourceClaimInterface { + return newResourceClaims(c, namespace) +} + +func (c *ResourceV1beta1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { + return newResourceClaimTemplates(c, namespace) +} + +func (c *ResourceV1beta1Client) ResourceSlices() ResourceSliceInterface { + return newResourceSlices(c) +} + +// NewForConfig creates a new ResourceV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ResourceV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ResourceV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ResourceV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ResourceV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ResourceV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ResourceV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1beta1Client { + return &ResourceV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := resourcev1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ResourceV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..3172ab5df --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.CreateOptions) (*resourcev1beta1.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceClaim *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceClaim, *resourcev1beta1.ResourceClaimList, *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration] +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1beta1Client, namespace string) *resourceClaims { + return &resourceClaims{ + gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceClaim, *resourcev1beta1.ResourceClaimList, *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration]( + "resourceclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *resourcev1beta1.ResourceClaim { return &resourcev1beta1.ResourceClaim{} }, + func() *resourcev1beta1.ResourceClaimList { return &resourcev1beta1.ResourceClaimList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.ResourceClaim](), + ), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..26c6fe829 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, opts v1.CreateOptions) (*resourcev1beta1.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceClaimTemplate, *resourcev1beta1.ResourceClaimTemplateList, *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration] +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1beta1Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceClaimTemplate, *resourcev1beta1.ResourceClaimTemplateList, *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration]( + "resourceclaimtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *resourcev1beta1.ResourceClaimTemplate { return &resourcev1beta1.ResourceClaimTemplate{} }, + func() *resourcev1beta1.ResourceClaimTemplateList { return &resourcev1beta1.ResourceClaimTemplateList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.ResourceClaimTemplate](), + ), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..c4e985ea4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceSlicesGetter has a method to return a ResourceSliceInterface. +// A group's client should implement this interface. +type ResourceSlicesGetter interface { + ResourceSlices() ResourceSliceInterface +} + +// ResourceSliceInterface has methods to work with ResourceSlice resources. +type ResourceSliceInterface interface { + Create(ctx context.Context, resourceSlice *resourcev1beta1.ResourceSlice, opts v1.CreateOptions) (*resourcev1beta1.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *resourcev1beta1.ResourceSlice, opts v1.UpdateOptions) (*resourcev1beta1.ResourceSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceSlice, err error) + ResourceSliceExpansion +} + +// resourceSlices implements ResourceSliceInterface +type resourceSlices struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceSlice, *resourcev1beta1.ResourceSliceList, *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration] +} + +// newResourceSlices returns a ResourceSlices +func newResourceSlices(c *ResourceV1beta1Client) *resourceSlices { + return &resourceSlices{ + gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceSlice, *resourcev1beta1.ResourceSliceList, *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration]( + "resourceslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *resourcev1beta1.ResourceSlice { return &resourcev1beta1.ResourceSlice{} }, + func() *resourcev1beta1.ResourceSliceList { return &resourcev1beta1.ResourceSliceList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.ResourceSlice](), + ), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index 92847184b..ea8270eac 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" ) -// FakePriorityClasses implements PriorityClassInterface -type FakePriorityClasses struct { +// fakePriorityClasses implements PriorityClassInterface +type fakePriorityClasses struct { + *gentype.FakeClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration] Fake *FakeSchedulingV1 } -var priorityclassesResource = v1.SchemeGroupVersion.WithResource("priorityclasses") - -var priorityclassesKind = v1.SchemeGroupVersion.WithKind("PriorityClass") - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - emptyResult := &v1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityClass), err -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - emptyResult := &v1.PriorityClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PriorityClassList{ListMeta: obj.(*v1.PriorityClassList).ListMeta} - for _, item := range obj.(*v1.PriorityClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { - emptyResult := &v1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityClass), err -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { - emptyResult := &v1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityClass), err -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1.PriorityClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PriorityClassList{}) - return err -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { - emptyResult := &v1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.PriorityClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - emptyResult := &v1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityClasses(fake *FakeSchedulingV1) typedschedulingv1.PriorityClassInterface { + return &fakePriorityClasses{ + gentype.NewFakeClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("priorityclasses"), + v1.SchemeGroupVersion.WithKind("PriorityClass"), + func() *v1.PriorityClass { return &v1.PriorityClass{} }, + func() *v1.PriorityClassList { return &v1.PriorityClassList{} }, + func(dst, src *v1.PriorityClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PriorityClassList) []*v1.PriorityClass { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.PriorityClassList, items []*v1.PriorityClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go index a64ac945b..75f903cfe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go @@ -29,7 +29,7 @@ type FakeSchedulingV1 struct { } func (c *FakeSchedulingV1) PriorityClasses() v1.PriorityClassInterface { - return &FakePriorityClasses{c} + return newFakePriorityClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index a28ef2fd4..3642b404a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/scheduling/v1" + schedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (*v1.PriorityClass, error) - Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (*v1.PriorityClass, error) + Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts metav1.CreateOptions) (*schedulingv1.PriorityClass, error) + Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts metav1.UpdateOptions) (*schedulingv1.PriorityClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*schedulingv1.PriorityClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1.PriorityClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) - Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *schedulingv1.PriorityClass, err error) + Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *schedulingv1.PriorityClass, err error) PriorityClassExpansion } // priorityClasses implements PriorityClassInterface type priorityClasses struct { - *gentype.ClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration] + *gentype.ClientWithListAndApply[*schedulingv1.PriorityClass, *schedulingv1.PriorityClassList, *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { return &priorityClasses{ - gentype.NewClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*schedulingv1.PriorityClass, *schedulingv1.PriorityClassList, *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration]( "priorityclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.PriorityClass { return &v1.PriorityClass{} }, - func() *v1.PriorityClassList { return &v1.PriorityClassList{} }), + func() *schedulingv1.PriorityClass { return &schedulingv1.PriorityClass{} }, + func() *schedulingv1.PriorityClassList { return &schedulingv1.PriorityClassList{} }, + gentype.PrefersProtobuf[*schedulingv1.PriorityClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go index 11fc4b9f3..bbb46a9de 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/scheduling/v1" - "k8s.io/client-go/kubernetes/scheme" + schedulingv1 "k8s.io/api/scheduling/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := schedulingv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 055d458a3..7d5a8e263 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" ) -// FakePriorityClasses implements PriorityClassInterface -type FakePriorityClasses struct { +// fakePriorityClasses implements PriorityClassInterface +type fakePriorityClasses struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration] Fake *FakeSchedulingV1alpha1 } -var priorityclassesResource = v1alpha1.SchemeGroupVersion.WithResource("priorityclasses") - -var priorityclassesKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityClass") - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - emptyResult := &v1alpha1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - emptyResult := &v1alpha1.PriorityClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PriorityClassList{ListMeta: obj.(*v1alpha1.PriorityClassList).ListMeta} - for _, item := range obj.(*v1alpha1.PriorityClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { - emptyResult := &v1alpha1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { - emptyResult := &v1alpha1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1alpha1.PriorityClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{}) - return err -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - emptyResult := &v1alpha1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - emptyResult := &v1alpha1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityClasses(fake *FakeSchedulingV1alpha1) typedschedulingv1alpha1.PriorityClassInterface { + return &fakePriorityClasses{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("priorityclasses"), + v1alpha1.SchemeGroupVersion.WithKind("PriorityClass"), + func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} }, + func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }, + func(dst, src *v1alpha1.PriorityClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.PriorityClassList) []*v1alpha1.PriorityClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.PriorityClassList, items []*v1alpha1.PriorityClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go index 974ba193f..34e8ad9bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go @@ -29,7 +29,7 @@ type FakeSchedulingV1alpha1 struct { } func (c *FakeSchedulingV1alpha1) PriorityClasses() v1alpha1.PriorityClassInterface { - return &FakePriorityClasses{c} + return newFakePriorityClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 5c78f3de9..e7125f9fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (*v1alpha1.PriorityClass, error) - Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (*v1alpha1.PriorityClass, error) + Create(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClass, opts v1.CreateOptions) (*schedulingv1alpha1.PriorityClass, error) + Update(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClass, opts v1.UpdateOptions) (*schedulingv1alpha1.PriorityClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1alpha1.PriorityClass, error) + List(ctx context.Context, opts v1.ListOptions) (*schedulingv1alpha1.PriorityClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) - Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1alpha1.PriorityClass, err error) + Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1alpha1.PriorityClass, err error) PriorityClassExpansion } // priorityClasses implements PriorityClassInterface type priorityClasses struct { - *gentype.ClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration] + *gentype.ClientWithListAndApply[*schedulingv1alpha1.PriorityClass, *schedulingv1alpha1.PriorityClassList, *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { return &priorityClasses{ - gentype.NewClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*schedulingv1alpha1.PriorityClass, *schedulingv1alpha1.PriorityClassList, *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration]( "priorityclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} }, - func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }), + func() *schedulingv1alpha1.PriorityClass { return &schedulingv1alpha1.PriorityClass{} }, + func() *schedulingv1alpha1.PriorityClassList { return &schedulingv1alpha1.PriorityClassList{} }, + gentype.PrefersProtobuf[*schedulingv1alpha1.PriorityClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 47fb774a3..056ab855e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := schedulingv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index 49d82a7ed..9a62b17f2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/scheduling/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedschedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" ) -// FakePriorityClasses implements PriorityClassInterface -type FakePriorityClasses struct { +// fakePriorityClasses implements PriorityClassInterface +type fakePriorityClasses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration] Fake *FakeSchedulingV1beta1 } -var priorityclassesResource = v1beta1.SchemeGroupVersion.WithResource("priorityclasses") - -var priorityclassesKind = v1beta1.SchemeGroupVersion.WithKind("PriorityClass") - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - emptyResult := &v1beta1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - emptyResult := &v1beta1.PriorityClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PriorityClassList{ListMeta: obj.(*v1beta1.PriorityClassList).ListMeta} - for _, item := range obj.(*v1beta1.PriorityClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { - emptyResult := &v1beta1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { - emptyResult := &v1beta1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1beta1.PriorityClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{}) - return err -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { - emptyResult := &v1beta1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - emptyResult := &v1beta1.PriorityClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePriorityClasses(fake *FakeSchedulingV1beta1) typedschedulingv1beta1.PriorityClassInterface { + return &fakePriorityClasses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("priorityclasses"), + v1beta1.SchemeGroupVersion.WithKind("PriorityClass"), + func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} }, + func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }, + func(dst, src *v1beta1.PriorityClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.PriorityClassList) []*v1beta1.PriorityClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.PriorityClassList, items []*v1beta1.PriorityClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go index 4a6878a45..f06fdab11 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go @@ -29,7 +29,7 @@ type FakeSchedulingV1beta1 struct { } func (c *FakeSchedulingV1beta1) PriorityClasses() v1beta1.PriorityClassInterface { - return &FakePriorityClasses{c} + return newFakePriorityClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 9fef1d759..dcba291e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/scheduling/v1beta1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (*v1beta1.PriorityClass, error) - Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (*v1beta1.PriorityClass, error) + Create(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClass, opts v1.CreateOptions) (*schedulingv1beta1.PriorityClass, error) + Update(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClass, opts v1.UpdateOptions) (*schedulingv1beta1.PriorityClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1beta1.PriorityClass, error) + List(ctx context.Context, opts v1.ListOptions) (*schedulingv1beta1.PriorityClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) - Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1beta1.PriorityClass, err error) + Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1beta1.PriorityClass, err error) PriorityClassExpansion } // priorityClasses implements PriorityClassInterface type priorityClasses struct { - *gentype.ClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration] + *gentype.ClientWithListAndApply[*schedulingv1beta1.PriorityClass, *schedulingv1beta1.PriorityClassList, *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { return &priorityClasses{ - gentype.NewClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*schedulingv1beta1.PriorityClass, *schedulingv1beta1.PriorityClassList, *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration]( "priorityclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} }, - func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }), + func() *schedulingv1beta1.PriorityClass { return &schedulingv1beta1.PriorityClass{} }, + func() *schedulingv1beta1.PriorityClassList { return &schedulingv1beta1.PriorityClassList{} }, + gentype.PrefersProtobuf[*schedulingv1beta1.PriorityClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index dbaf69414..9e383398e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/scheduling/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := schedulingv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go index 2e14db600..9eb82f9ed 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSIDriversGetter interface { // CSIDriverInterface has methods to work with CSIDriver resources. type CSIDriverInterface interface { - Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (*v1.CSIDriver, error) - Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (*v1.CSIDriver, error) + Create(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts metav1.CreateOptions) (*storagev1.CSIDriver, error) + Update(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts metav1.UpdateOptions) (*storagev1.CSIDriver, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIDriver, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIDriverList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSIDriver, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSIDriverList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) - Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSIDriver, err error) + Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSIDriver, err error) CSIDriverExpansion } // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - *gentype.ClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1.CSIDriver, *storagev1.CSIDriverList, *applyconfigurationsstoragev1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1Client) *cSIDrivers { return &cSIDrivers{ - gentype.NewClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1.CSIDriver, *storagev1.CSIDriverList, *applyconfigurationsstoragev1.CSIDriverApplyConfiguration]( "csidrivers", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.CSIDriver { return &v1.CSIDriver{} }, - func() *v1.CSIDriverList { return &v1.CSIDriverList{} }), + func() *storagev1.CSIDriver { return &storagev1.CSIDriver{} }, + func() *storagev1.CSIDriverList { return &storagev1.CSIDriverList{} }, + gentype.PrefersProtobuf[*storagev1.CSIDriver](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index 6d28d7ed1..a4fe6a0ee 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (*v1.CSINode, error) - Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (*v1.CSINode, error) + Create(ctx context.Context, cSINode *storagev1.CSINode, opts metav1.CreateOptions) (*storagev1.CSINode, error) + Update(ctx context.Context, cSINode *storagev1.CSINode, opts metav1.UpdateOptions) (*storagev1.CSINode, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSINode, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CSINodeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSINode, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSINodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) - Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSINode, err error) + Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSINode, err error) CSINodeExpansion } // cSINodes implements CSINodeInterface type cSINodes struct { - *gentype.ClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1.CSINode, *storagev1.CSINodeList, *applyconfigurationsstoragev1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1Client) *cSINodes { return &cSINodes{ - gentype.NewClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1.CSINode, *storagev1.CSINodeList, *applyconfigurationsstoragev1.CSINodeApplyConfiguration]( "csinodes", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.CSINode { return &v1.CSINode{} }, - func() *v1.CSINodeList { return &v1.CSINodeList{} }), + func() *storagev1.CSINode { return &storagev1.CSINode{} }, + func() *storagev1.CSINodeList { return &storagev1.CSINodeList{} }, + gentype.PrefersProtobuf[*storagev1.CSINode](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go index 8a762b9ff..50a942978 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSIStorageCapacitiesGetter interface { // CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. type CSIStorageCapacityInterface interface { - Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (*v1.CSIStorageCapacity, error) - Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (*v1.CSIStorageCapacity, error) + Create(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts metav1.CreateOptions) (*storagev1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts metav1.UpdateOptions) (*storagev1.CSIStorageCapacity, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIStorageCapacity, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIStorageCapacityList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSIStorageCapacity, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSIStorageCapacityList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) - Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSIStorageCapacity, err error) CSIStorageCapacityExpansion } // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - *gentype.ClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1.CSIStorageCapacity, *storagev1.CSIStorageCapacityList, *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - gentype.NewClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1.CSIStorageCapacity, *storagev1.CSIStorageCapacityList, *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration]( "csistoragecapacities", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} }, - func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }), + func() *storagev1.CSIStorageCapacity { return &storagev1.CSIStorageCapacity{} }, + func() *storagev1.CSIStorageCapacityList { return &storagev1.CSIStorageCapacityList{} }, + gentype.PrefersProtobuf[*storagev1.CSIStorageCapacity](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go index 1df7c034b..9c6a70725 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go @@ -19,133 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1" ) -// FakeCSIDrivers implements CSIDriverInterface -type FakeCSIDrivers struct { +// fakeCSIDrivers implements CSIDriverInterface +type fakeCSIDrivers struct { + *gentype.FakeClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration] Fake *FakeStorageV1 } -var csidriversResource = v1.SchemeGroupVersion.WithResource("csidrivers") - -var csidriversKind = v1.SchemeGroupVersion.WithKind("CSIDriver") - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { - emptyResult := &v1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIDriver), err -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { - emptyResult := &v1.CSIDriverList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CSIDriverList{ListMeta: obj.(*v1.CSIDriverList).ListMeta} - for _, item := range obj.(*v1.CSIDriverList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *FakeCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts)) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { - emptyResult := &v1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIDriver), err -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { - emptyResult := &v1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIDriver), err -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &v1.CSIDriver{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CSIDriverList{}) - return err -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { - emptyResult := &v1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIDriver), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - emptyResult := &v1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeCSIDrivers(fake *FakeStorageV1) typedstoragev1.CSIDriverInterface { + return &fakeCSIDrivers{ + gentype.NewFakeClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("csidrivers"), + v1.SchemeGroupVersion.WithKind("CSIDriver"), + func() *v1.CSIDriver { return &v1.CSIDriver{} }, + func() *v1.CSIDriverList { return &v1.CSIDriverList{} }, + func(dst, src *v1.CSIDriverList) { dst.ListMeta = src.ListMeta }, + func(list *v1.CSIDriverList) []*v1.CSIDriver { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.CSIDriverList, items []*v1.CSIDriver) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.CSIDriver), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index e2b8e8cc8..2827e6dd6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -19,133 +19,31 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1" ) -// FakeCSINodes implements CSINodeInterface -type FakeCSINodes struct { +// fakeCSINodes implements CSINodeInterface +type fakeCSINodes struct { + *gentype.FakeClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration] Fake *FakeStorageV1 } -var csinodesResource = v1.SchemeGroupVersion.WithResource("csinodes") - -var csinodesKind = v1.SchemeGroupVersion.WithKind("CSINode") - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { - emptyResult := &v1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSINode), err -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { - emptyResult := &v1.CSINodeList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CSINodeList{ListMeta: obj.(*v1.CSINodeList).ListMeta} - for _, item := range obj.(*v1.CSINodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts)) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { - emptyResult := &v1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSINode), err -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { - emptyResult := &v1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSINode), err -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &v1.CSINode{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CSINodeList{}) - return err -} - -// Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { - emptyResult := &v1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSINode), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - emptyResult := &v1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeCSINodes(fake *FakeStorageV1) typedstoragev1.CSINodeInterface { + return &fakeCSINodes{ + gentype.NewFakeClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("csinodes"), + v1.SchemeGroupVersion.WithKind("CSINode"), + func() *v1.CSINode { return &v1.CSINode{} }, + func() *v1.CSINodeList { return &v1.CSINodeList{} }, + func(dst, src *v1.CSINodeList) { dst.ListMeta = src.ListMeta }, + func(list *v1.CSINodeList) []*v1.CSINode { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.CSINodeList, items []*v1.CSINode) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.CSINode), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go index a86014855..ba778c250 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1" ) -// FakeCSIStorageCapacities implements CSIStorageCapacityInterface -type FakeCSIStorageCapacities struct { +// fakeCSIStorageCapacities implements CSIStorageCapacityInterface +type fakeCSIStorageCapacities struct { + *gentype.FakeClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration] Fake *FakeStorageV1 - ns string -} - -var csistoragecapacitiesResource = v1.SchemeGroupVersion.WithResource("csistoragecapacities") - -var csistoragecapacitiesKind = v1.SchemeGroupVersion.WithKind("CSIStorageCapacity") - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { - emptyResult := &v1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { - emptyResult := &v1.CSIStorageCapacityList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CSIStorageCapacityList{ListMeta: obj.(*v1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*v1.CSIStorageCapacityList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) - } -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { - emptyResult := &v1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { - emptyResult := &v1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1.CSIStorageCapacity{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CSIStorageCapacityList{}) - return err -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { - emptyResult := &v1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - emptyResult := &v1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeCSIStorageCapacities(fake *FakeStorageV1, namespace string) typedstoragev1.CSIStorageCapacityInterface { + return &fakeCSIStorageCapacities{ + gentype.NewFakeClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("csistoragecapacities"), + v1.SchemeGroupVersion.WithKind("CSIStorageCapacity"), + func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} }, + func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }, + func(dst, src *v1.CSIStorageCapacityList) { dst.ListMeta = src.ListMeta }, + func(list *v1.CSIStorageCapacityList) []*v1.CSIStorageCapacity { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.CSIStorageCapacityList, items []*v1.CSIStorageCapacity) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go index 5cb91b516..d922d6a09 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -29,23 +29,23 @@ type FakeStorageV1 struct { } func (c *FakeStorageV1) CSIDrivers() v1.CSIDriverInterface { - return &FakeCSIDrivers{c} + return newFakeCSIDrivers(c) } func (c *FakeStorageV1) CSINodes() v1.CSINodeInterface { - return &FakeCSINodes{c} + return newFakeCSINodes(c) } func (c *FakeStorageV1) CSIStorageCapacities(namespace string) v1.CSIStorageCapacityInterface { - return &FakeCSIStorageCapacities{c, namespace} + return newFakeCSIStorageCapacities(c, namespace) } func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface { - return &FakeStorageClasses{c} + return newFakeStorageClasses(c) } func (c *FakeStorageV1) VolumeAttachments() v1.VolumeAttachmentInterface { - return &FakeVolumeAttachments{c} + return newFakeVolumeAttachments(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 8910be1db..18cb8eba1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1" ) -// FakeStorageClasses implements StorageClassInterface -type FakeStorageClasses struct { +// fakeStorageClasses implements StorageClassInterface +type fakeStorageClasses struct { + *gentype.FakeClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration] Fake *FakeStorageV1 } -var storageclassesResource = v1.SchemeGroupVersion.WithResource("storageclasses") - -var storageclassesKind = v1.SchemeGroupVersion.WithKind("StorageClass") - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - emptyResult := &v1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StorageClass), err -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - emptyResult := &v1.StorageClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.StorageClassList{ListMeta: obj.(*v1.StorageClassList).ListMeta} - for _, item := range obj.(*v1.StorageClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts)) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { - emptyResult := &v1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StorageClass), err -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { - emptyResult := &v1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StorageClass), err -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &v1.StorageClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) - return err -} - -// Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { - emptyResult := &v1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.StorageClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - emptyResult := &v1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeStorageClasses(fake *FakeStorageV1) typedstoragev1.StorageClassInterface { + return &fakeStorageClasses{ + gentype.NewFakeClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("storageclasses"), + v1.SchemeGroupVersion.WithKind("StorageClass"), + func() *v1.StorageClass { return &v1.StorageClass{} }, + func() *v1.StorageClassList { return &v1.StorageClassList{} }, + func(dst, src *v1.StorageClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1.StorageClassList) []*v1.StorageClass { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.StorageClassList, items []*v1.StorageClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index 3d3d71ec5..e3bc8d889 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -19,168 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1" ) -// FakeVolumeAttachments implements VolumeAttachmentInterface -type FakeVolumeAttachments struct { +// fakeVolumeAttachments implements VolumeAttachmentInterface +type fakeVolumeAttachments struct { + *gentype.FakeClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration] Fake *FakeStorageV1 } -var volumeattachmentsResource = v1.SchemeGroupVersion.WithResource("volumeattachments") - -var volumeattachmentsKind = v1.SchemeGroupVersion.WithKind("VolumeAttachment") - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.VolumeAttachment), err -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - emptyResult := &v1.VolumeAttachmentList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.VolumeAttachmentList{ListMeta: obj.(*v1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*v1.VolumeAttachmentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.VolumeAttachment), err -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.VolumeAttachment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.VolumeAttachment), err -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1.VolumeAttachment{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.VolumeAttachmentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.VolumeAttachment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1.VolumeAttachment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - emptyResult := &v1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeVolumeAttachments(fake *FakeStorageV1) typedstoragev1.VolumeAttachmentInterface { + return &fakeVolumeAttachments{ + gentype.NewFakeClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("volumeattachments"), + v1.SchemeGroupVersion.WithKind("VolumeAttachment"), + func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} }, + func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }, + func(dst, src *v1.VolumeAttachmentList) { dst.ListMeta = src.ListMeta }, + func(list *v1.VolumeAttachmentList) []*v1.VolumeAttachment { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.VolumeAttachmentList, items []*v1.VolumeAttachment) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 750fe8b62..70aaff169 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/storage/v1" - "k8s.io/client-go/kubernetes/scheme" + storagev1 "k8s.io/api/storage/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *StorageV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := storagev1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index d7b6ff68a..f33a351f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (*v1.StorageClass, error) - Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (*v1.StorageClass, error) + Create(ctx context.Context, storageClass *storagev1.StorageClass, opts metav1.CreateOptions) (*storagev1.StorageClass, error) + Update(ctx context.Context, storageClass *storagev1.StorageClass, opts metav1.UpdateOptions) (*storagev1.StorageClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StorageClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.StorageClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.StorageClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) - Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.StorageClass, err error) + Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.StorageClass, err error) StorageClassExpansion } // storageClasses implements StorageClassInterface type storageClasses struct { - *gentype.ClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1.StorageClass, *storagev1.StorageClassList, *applyconfigurationsstoragev1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1Client) *storageClasses { return &storageClasses{ - gentype.NewClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1.StorageClass, *storagev1.StorageClassList, *applyconfigurationsstoragev1.StorageClassApplyConfiguration]( "storageclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.StorageClass { return &v1.StorageClass{} }, - func() *v1.StorageClassList { return &v1.StorageClassList{} }), + func() *storagev1.StorageClass { return &storagev1.StorageClass{} }, + func() *storagev1.StorageClassList { return &storagev1.StorageClassList{} }, + gentype.PrefersProtobuf[*storagev1.StorageClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index 3a0404284..60db4844f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) - Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.CreateOptions) (*storagev1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.UpdateOptions) (*storagev1.VolumeAttachment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.UpdateOptions) (*storagev1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeAttachment, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.VolumeAttachment, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.VolumeAttachmentList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) - Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.VolumeAttachment, err error) + Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.VolumeAttachment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) + ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - *gentype.ClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1.VolumeAttachment, *storagev1.VolumeAttachmentList, *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { return &volumeAttachments{ - gentype.NewClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1.VolumeAttachment, *storagev1.VolumeAttachmentList, *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration]( "volumeattachments", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} }, - func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }), + func() *storagev1.VolumeAttachment { return &storagev1.VolumeAttachment{} }, + func() *storagev1.VolumeAttachmentList { return &storagev1.VolumeAttachmentList{} }, + gentype.PrefersProtobuf[*storagev1.VolumeAttachment](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go index 6819deff6..63ca27fa4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSIStorageCapacitiesGetter interface { // CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. type CSIStorageCapacityInterface interface { - Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (*v1alpha1.CSIStorageCapacity, error) - Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (*v1alpha1.CSIStorageCapacity, error) + Create(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (*storagev1alpha1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (*storagev1alpha1.CSIStorageCapacity, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CSIStorageCapacity, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CSIStorageCapacityList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.CSIStorageCapacity, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.CSIStorageCapacityList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) - Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.CSIStorageCapacity, err error) CSIStorageCapacityExpansion } // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - *gentype.ClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1alpha1.CSIStorageCapacity, *storagev1alpha1.CSIStorageCapacityList, *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - gentype.NewClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1alpha1.CSIStorageCapacity, *storagev1alpha1.CSIStorageCapacityList, *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration]( "csistoragecapacities", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} }, - func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }), + func() *storagev1alpha1.CSIStorageCapacity { return &storagev1alpha1.CSIStorageCapacity{} }, + func() *storagev1alpha1.CSIStorageCapacityList { return &storagev1alpha1.CSIStorageCapacityList{} }, + gentype.PrefersProtobuf[*storagev1alpha1.CSIStorageCapacity](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go index 0bcaccd20..3dc542754 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" ) -// FakeCSIStorageCapacities implements CSIStorageCapacityInterface -type FakeCSIStorageCapacities struct { +// fakeCSIStorageCapacities implements CSIStorageCapacityInterface +type fakeCSIStorageCapacities struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration] Fake *FakeStorageV1alpha1 - ns string -} - -var csistoragecapacitiesResource = v1alpha1.SchemeGroupVersion.WithResource("csistoragecapacities") - -var csistoragecapacitiesKind = v1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity") - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - emptyResult := &v1alpha1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { - emptyResult := &v1alpha1.CSIStorageCapacityList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.CSIStorageCapacityList{ListMeta: obj.(*v1alpha1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*v1alpha1.CSIStorageCapacityList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) - } -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - emptyResult := &v1alpha1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - emptyResult := &v1alpha1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1alpha1.CSIStorageCapacity{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.CSIStorageCapacityList{}) - return err -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { - emptyResult := &v1alpha1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - emptyResult := &v1alpha1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeCSIStorageCapacities(fake *FakeStorageV1alpha1, namespace string) typedstoragev1alpha1.CSIStorageCapacityInterface { + return &fakeCSIStorageCapacities{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("csistoragecapacities"), + v1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity"), + func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} }, + func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }, + func(dst, src *v1alpha1.CSIStorageCapacityList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.CSIStorageCapacityList) []*v1alpha1.CSIStorageCapacity { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.CSIStorageCapacityList, items []*v1alpha1.CSIStorageCapacity) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go index 0e078f348..b5670cbdd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go @@ -29,15 +29,15 @@ type FakeStorageV1alpha1 struct { } func (c *FakeStorageV1alpha1) CSIStorageCapacities(namespace string) v1alpha1.CSIStorageCapacityInterface { - return &FakeCSIStorageCapacities{c, namespace} + return newFakeCSIStorageCapacities(c, namespace) } func (c *FakeStorageV1alpha1) VolumeAttachments() v1alpha1.VolumeAttachmentInterface { - return &FakeVolumeAttachments{c} + return newFakeVolumeAttachments(c) } func (c *FakeStorageV1alpha1) VolumeAttributesClasses() v1alpha1.VolumeAttributesClassInterface { - return &FakeVolumeAttributesClasses{c} + return newFakeVolumeAttributesClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index a07247f8f..a4c230709 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" ) -// FakeVolumeAttachments implements VolumeAttachmentInterface -type FakeVolumeAttachments struct { +// fakeVolumeAttachments implements VolumeAttachmentInterface +type fakeVolumeAttachments struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration] Fake *FakeStorageV1alpha1 } -var volumeattachmentsResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattachments") - -var volumeattachmentsKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment") - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - emptyResult := &v1alpha1.VolumeAttachmentList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.VolumeAttachmentList{ListMeta: obj.(*v1alpha1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*v1alpha1.VolumeAttachmentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1alpha1.VolumeAttachment{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - emptyResult := &v1alpha1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeVolumeAttachments(fake *FakeStorageV1alpha1) typedstoragev1alpha1.VolumeAttachmentInterface { + return &fakeVolumeAttachments{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("volumeattachments"), + v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment"), + func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} }, + func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }, + func(dst, src *v1alpha1.VolumeAttachmentList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.VolumeAttachmentList) []*v1alpha1.VolumeAttachment { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.VolumeAttachmentList, items []*v1alpha1.VolumeAttachment) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go index 0d7fe9aa8..8fab41a73 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" ) -// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface -type FakeVolumeAttributesClasses struct { +// fakeVolumeAttributesClasses implements VolumeAttributesClassInterface +type fakeVolumeAttributesClasses struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration] Fake *FakeStorageV1alpha1 } -var volumeattributesclassesResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses") - -var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass") - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - emptyResult := &v1alpha1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { - emptyResult := &v1alpha1.VolumeAttributesClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.VolumeAttributesClassList{ListMeta: obj.(*v1alpha1.VolumeAttributesClassList).ListMeta} - for _, item := range obj.(*v1alpha1.VolumeAttributesClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts)) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - emptyResult := &v1alpha1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - emptyResult := &v1alpha1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1alpha1.VolumeAttributesClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { - emptyResult := &v1alpha1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - emptyResult := &v1alpha1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeVolumeAttributesClasses(fake *FakeStorageV1alpha1) typedstoragev1alpha1.VolumeAttributesClassInterface { + return &fakeVolumeAttributesClasses{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses"), + v1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass"), + func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} }, + func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }, + func(dst, src *v1alpha1.VolumeAttributesClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.VolumeAttributesClassList) []*v1alpha1.VolumeAttributesClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.VolumeAttributesClassList, items []*v1alpha1.VolumeAttributesClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.VolumeAttributesClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 63e3fc243..17b680d19 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -95,10 +95,10 @@ func New(c rest.Interface) *StorageV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := storagev1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 0982d5568..d9c24ab5b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) - Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.CreateOptions) (*storagev1alpha1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttachment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttachment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.VolumeAttachment, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.VolumeAttachmentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) - Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.VolumeAttachment, err error) + Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttachment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) + ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1alpha1.VolumeAttachment, *storagev1alpha1.VolumeAttachmentList, *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { return &volumeAttachments{ - gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1alpha1.VolumeAttachment, *storagev1alpha1.VolumeAttachmentList, *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration]( "volumeattachments", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} }, - func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }), + func() *storagev1alpha1.VolumeAttachment { return &storagev1alpha1.VolumeAttachment{} }, + func() *storagev1alpha1.VolumeAttachmentList { return &storagev1alpha1.VolumeAttachmentList{} }, + gentype.PrefersProtobuf[*storagev1alpha1.VolumeAttachment](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go index 40cff7588..ef7d6f4ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type VolumeAttributesClassesGetter interface { // VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. type VolumeAttributesClassInterface interface { - Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error) - Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error) + Create(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*storagev1alpha1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttributesClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.VolumeAttributesClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) - Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttributesClass, err error) VolumeAttributesClassExpansion } // volumeAttributesClasses implements VolumeAttributesClassInterface type volumeAttributesClasses struct { - *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1alpha1.VolumeAttributesClass, *storagev1alpha1.VolumeAttributesClassList, *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration] } // newVolumeAttributesClasses returns a VolumeAttributesClasses func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { return &volumeAttributesClasses{ - gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1alpha1.VolumeAttributesClass, *storagev1alpha1.VolumeAttributesClassList, *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration]( "volumeattributesclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} }, - func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }), + func() *storagev1alpha1.VolumeAttributesClass { return &storagev1alpha1.VolumeAttributesClass{} }, + func() *storagev1alpha1.VolumeAttributesClassList { return &storagev1alpha1.VolumeAttributesClassList{} }, + gentype.PrefersProtobuf[*storagev1alpha1.VolumeAttributesClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 2748919b4..063fdb8d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSIDriversGetter interface { // CSIDriverInterface has methods to work with CSIDriver resources. type CSIDriverInterface interface { - Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (*v1beta1.CSIDriver, error) - Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (*v1beta1.CSIDriver, error) + Create(ctx context.Context, cSIDriver *storagev1beta1.CSIDriver, opts v1.CreateOptions) (*storagev1beta1.CSIDriver, error) + Update(ctx context.Context, cSIDriver *storagev1beta1.CSIDriver, opts v1.UpdateOptions) (*storagev1beta1.CSIDriver, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIDriver, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIDriverList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSIDriver, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSIDriverList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) - Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSIDriver, err error) + Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSIDriver, err error) CSIDriverExpansion } // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - *gentype.ClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1beta1.CSIDriver, *storagev1beta1.CSIDriverList, *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { return &cSIDrivers{ - gentype.NewClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1beta1.CSIDriver, *storagev1beta1.CSIDriverList, *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration]( "csidrivers", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} }, - func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }), + func() *storagev1beta1.CSIDriver { return &storagev1beta1.CSIDriver{} }, + func() *storagev1beta1.CSIDriverList { return &storagev1beta1.CSIDriverList{} }, + gentype.PrefersProtobuf[*storagev1beta1.CSIDriver](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index fe6fe228e..5e8eb2e37 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (*v1beta1.CSINode, error) - Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (*v1beta1.CSINode, error) + Create(ctx context.Context, cSINode *storagev1beta1.CSINode, opts v1.CreateOptions) (*storagev1beta1.CSINode, error) + Update(ctx context.Context, cSINode *storagev1beta1.CSINode, opts v1.UpdateOptions) (*storagev1beta1.CSINode, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSINode, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSINodeList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSINode, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSINodeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) - Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSINode, err error) + Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSINode, err error) CSINodeExpansion } // cSINodes implements CSINodeInterface type cSINodes struct { - *gentype.ClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1beta1.CSINode, *storagev1beta1.CSINodeList, *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1beta1Client) *cSINodes { return &cSINodes{ - gentype.NewClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1beta1.CSINode, *storagev1beta1.CSINodeList, *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration]( "csinodes", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.CSINode { return &v1beta1.CSINode{} }, - func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }), + func() *storagev1beta1.CSINode { return &storagev1beta1.CSINode{} }, + func() *storagev1beta1.CSINodeList { return &storagev1beta1.CSINodeList{} }, + gentype.PrefersProtobuf[*storagev1beta1.CSINode](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go index e9ffc1df9..d1f5a7029 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type CSIStorageCapacitiesGetter interface { // CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. type CSIStorageCapacityInterface interface { - Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (*v1beta1.CSIStorageCapacity, error) - Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (*v1beta1.CSIStorageCapacity, error) + Create(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, opts v1.CreateOptions) (*storagev1beta1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (*storagev1beta1.CSIStorageCapacity, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIStorageCapacity, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIStorageCapacityList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSIStorageCapacity, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSIStorageCapacityList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) - Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSIStorageCapacity, err error) CSIStorageCapacityExpansion } // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - *gentype.ClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1beta1.CSIStorageCapacity, *storagev1beta1.CSIStorageCapacityList, *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - gentype.NewClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1beta1.CSIStorageCapacity, *storagev1beta1.CSIStorageCapacityList, *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration]( "csistoragecapacities", c.RESTClient(), scheme.ParameterCodec, namespace, - func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} }, - func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }), + func() *storagev1beta1.CSIStorageCapacity { return &storagev1beta1.CSIStorageCapacity{} }, + func() *storagev1beta1.CSIStorageCapacityList { return &storagev1beta1.CSIStorageCapacityList{} }, + gentype.PrefersProtobuf[*storagev1beta1.CSIStorageCapacity](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index 2b230707f..e67366921 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" ) -// FakeCSIDrivers implements CSIDriverInterface -type FakeCSIDrivers struct { +// fakeCSIDrivers implements CSIDriverInterface +type fakeCSIDrivers struct { + *gentype.FakeClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration] Fake *FakeStorageV1beta1 } -var csidriversResource = v1beta1.SchemeGroupVersion.WithResource("csidrivers") - -var csidriversKind = v1beta1.SchemeGroupVersion.WithKind("CSIDriver") - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - emptyResult := &v1beta1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - emptyResult := &v1beta1.CSIDriverList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CSIDriverList{ListMeta: obj.(*v1beta1.CSIDriverList).ListMeta} - for _, item := range obj.(*v1beta1.CSIDriverList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts)) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { - emptyResult := &v1beta1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { - emptyResult := &v1beta1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &v1beta1.CSIDriver{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CSIDriverList{}) - return err -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { - emptyResult := &v1beta1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - emptyResult := &v1beta1.CSIDriver{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeCSIDrivers(fake *FakeStorageV1beta1) typedstoragev1beta1.CSIDriverInterface { + return &fakeCSIDrivers{ + gentype.NewFakeClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("csidrivers"), + v1beta1.SchemeGroupVersion.WithKind("CSIDriver"), + func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} }, + func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }, + func(dst, src *v1beta1.CSIDriverList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.CSIDriverList) []*v1beta1.CSIDriver { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.CSIDriverList, items []*v1beta1.CSIDriver) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.CSIDriver), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index c5c2b5825..f9c857024 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -19,133 +19,33 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" ) -// FakeCSINodes implements CSINodeInterface -type FakeCSINodes struct { +// fakeCSINodes implements CSINodeInterface +type fakeCSINodes struct { + *gentype.FakeClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration] Fake *FakeStorageV1beta1 } -var csinodesResource = v1beta1.SchemeGroupVersion.WithResource("csinodes") - -var csinodesKind = v1beta1.SchemeGroupVersion.WithKind("CSINode") - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - emptyResult := &v1beta1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSINode), err -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - emptyResult := &v1beta1.CSINodeList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CSINodeList{ListMeta: obj.(*v1beta1.CSINodeList).ListMeta} - for _, item := range obj.(*v1beta1.CSINodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts)) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { - emptyResult := &v1beta1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSINode), err -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { - emptyResult := &v1beta1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSINode), err -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &v1beta1.CSINode{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CSINodeList{}) - return err -} - -// Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { - emptyResult := &v1beta1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSINode), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - emptyResult := &v1beta1.CSINode{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeCSINodes(fake *FakeStorageV1beta1) typedstoragev1beta1.CSINodeInterface { + return &fakeCSINodes{ + gentype.NewFakeClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("csinodes"), + v1beta1.SchemeGroupVersion.WithKind("CSINode"), + func() *v1beta1.CSINode { return &v1beta1.CSINode{} }, + func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }, + func(dst, src *v1beta1.CSINodeList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.CSINodeList) []*v1beta1.CSINode { return gentype.ToPointerSlice(list.Items) }, + func(list *v1beta1.CSINodeList, items []*v1beta1.CSINode) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.CSINode), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go index 59a9aaf9d..9a8c2054b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go @@ -19,142 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" ) -// FakeCSIStorageCapacities implements CSIStorageCapacityInterface -type FakeCSIStorageCapacities struct { +// fakeCSIStorageCapacities implements CSIStorageCapacityInterface +type fakeCSIStorageCapacities struct { + *gentype.FakeClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration] Fake *FakeStorageV1beta1 - ns string -} - -var csistoragecapacitiesResource = v1beta1.SchemeGroupVersion.WithResource("csistoragecapacities") - -var csistoragecapacitiesKind = v1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity") - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { - emptyResult := &v1beta1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { - emptyResult := &v1beta1.CSIStorageCapacityList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CSIStorageCapacityList{ListMeta: obj.(*v1beta1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*v1beta1.CSIStorageCapacityList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) - } -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - emptyResult := &v1beta1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - emptyResult := &v1beta1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1beta1.CSIStorageCapacity{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CSIStorageCapacityList{}) - return err -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { - emptyResult := &v1beta1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - emptyResult := &v1beta1.CSIStorageCapacity{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err +func newFakeCSIStorageCapacities(fake *FakeStorageV1beta1, namespace string) typedstoragev1beta1.CSIStorageCapacityInterface { + return &fakeCSIStorageCapacities{ + gentype.NewFakeClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]( + fake.Fake, + namespace, + v1beta1.SchemeGroupVersion.WithResource("csistoragecapacities"), + v1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity"), + func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} }, + func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }, + func(dst, src *v1beta1.CSIStorageCapacityList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.CSIStorageCapacityList) []*v1beta1.CSIStorageCapacity { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.CSIStorageCapacityList, items []*v1beta1.CSIStorageCapacity) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go index 470281607..8c74de727 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -29,27 +29,27 @@ type FakeStorageV1beta1 struct { } func (c *FakeStorageV1beta1) CSIDrivers() v1beta1.CSIDriverInterface { - return &FakeCSIDrivers{c} + return newFakeCSIDrivers(c) } func (c *FakeStorageV1beta1) CSINodes() v1beta1.CSINodeInterface { - return &FakeCSINodes{c} + return newFakeCSINodes(c) } func (c *FakeStorageV1beta1) CSIStorageCapacities(namespace string) v1beta1.CSIStorageCapacityInterface { - return &FakeCSIStorageCapacities{c, namespace} + return newFakeCSIStorageCapacities(c, namespace) } func (c *FakeStorageV1beta1) StorageClasses() v1beta1.StorageClassInterface { - return &FakeStorageClasses{c} + return newFakeStorageClasses(c) } func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterface { - return &FakeVolumeAttachments{c} + return newFakeVolumeAttachments(c) } func (c *FakeStorageV1beta1) VolumeAttributesClasses() v1beta1.VolumeAttributesClassInterface { - return &FakeVolumeAttributesClasses{c} + return newFakeVolumeAttributesClasses(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 954a34608..0ce78cc74 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" ) -// FakeStorageClasses implements StorageClassInterface -type FakeStorageClasses struct { +// fakeStorageClasses implements StorageClassInterface +type fakeStorageClasses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration] Fake *FakeStorageV1beta1 } -var storageclassesResource = v1beta1.SchemeGroupVersion.WithResource("storageclasses") - -var storageclassesKind = v1beta1.SchemeGroupVersion.WithKind("StorageClass") - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - emptyResult := &v1beta1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StorageClass), err -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - emptyResult := &v1beta1.StorageClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.StorageClassList{ListMeta: obj.(*v1beta1.StorageClassList).ListMeta} - for _, item := range obj.(*v1beta1.StorageClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts)) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { - emptyResult := &v1beta1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StorageClass), err -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { - emptyResult := &v1beta1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StorageClass), err -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &v1beta1.StorageClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) - return err -} - -// Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { - emptyResult := &v1beta1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.StorageClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - emptyResult := &v1beta1.StorageClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeStorageClasses(fake *FakeStorageV1beta1) typedstoragev1beta1.StorageClassInterface { + return &fakeStorageClasses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("storageclasses"), + v1beta1.SchemeGroupVersion.WithKind("StorageClass"), + func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} }, + func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }, + func(dst, src *v1beta1.StorageClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.StorageClassList) []*v1beta1.StorageClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.StorageClassList, items []*v1beta1.StorageClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index 247f7ca62..29ac6c4c7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" ) -// FakeVolumeAttachments implements VolumeAttachmentInterface -type FakeVolumeAttachments struct { +// fakeVolumeAttachments implements VolumeAttachmentInterface +type fakeVolumeAttachments struct { + *gentype.FakeClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration] Fake *FakeStorageV1beta1 } -var volumeattachmentsResource = v1beta1.SchemeGroupVersion.WithResource("volumeattachments") - -var volumeattachmentsKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttachment") - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - emptyResult := &v1beta1.VolumeAttachmentList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VolumeAttachmentList{ListMeta: obj.(*v1beta1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*v1beta1.VolumeAttachmentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1beta1.VolumeAttachment{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - emptyResult := &v1beta1.VolumeAttachment{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeVolumeAttachments(fake *FakeStorageV1beta1) typedstoragev1beta1.VolumeAttachmentInterface { + return &fakeVolumeAttachments{ + gentype.NewFakeClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("volumeattachments"), + v1beta1.SchemeGroupVersion.WithKind("VolumeAttachment"), + func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} }, + func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }, + func(dst, src *v1beta1.VolumeAttachmentList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.VolumeAttachmentList) []*v1beta1.VolumeAttachment { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.VolumeAttachmentList, items []*v1beta1.VolumeAttachment) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go index 3cef7291a..0197e0b37 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go @@ -19,133 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" ) -// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface -type FakeVolumeAttributesClasses struct { +// fakeVolumeAttributesClasses implements VolumeAttributesClassInterface +type fakeVolumeAttributesClasses struct { + *gentype.FakeClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration] Fake *FakeStorageV1beta1 } -var volumeattributesclassesResource = v1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses") - -var volumeattributesclassesKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass") - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttributesClass, err error) { - emptyResult := &v1beta1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttributesClass), err -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttributesClassList, err error) { - emptyResult := &v1beta1.VolumeAttributesClassList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VolumeAttributesClassList{ListMeta: obj.(*v1beta1.VolumeAttributesClassList).ListMeta} - for _, item := range obj.(*v1beta1.VolumeAttributesClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts)) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1beta1.VolumeAttributesClass, err error) { - emptyResult := &v1beta1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttributesClass), err -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1beta1.VolumeAttributesClass, err error) { - emptyResult := &v1beta1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttributesClass), err -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1beta1.VolumeAttributesClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttributesClassList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) { - emptyResult := &v1beta1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.VolumeAttributesClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - emptyResult := &v1beta1.VolumeAttributesClass{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeVolumeAttributesClasses(fake *FakeStorageV1beta1) typedstoragev1beta1.VolumeAttributesClassInterface { + return &fakeVolumeAttributesClasses{ + gentype.NewFakeClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]( + fake.Fake, + "", + v1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses"), + v1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass"), + func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} }, + func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }, + func(dst, src *v1beta1.VolumeAttributesClassList) { dst.ListMeta = src.ListMeta }, + func(list *v1beta1.VolumeAttributesClassList) []*v1beta1.VolumeAttributesClass { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1beta1.VolumeAttributesClassList, items []*v1beta1.VolumeAttributesClass) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1beta1.VolumeAttributesClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 3d1b59e36..63b1d42a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + storagev1beta1 "k8s.io/api/storage/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -110,10 +110,10 @@ func New(c rest.Interface) *StorageV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := storagev1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index fed699cc8..341d5ba82 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (*v1beta1.StorageClass, error) - Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (*v1beta1.StorageClass, error) + Create(ctx context.Context, storageClass *storagev1beta1.StorageClass, opts v1.CreateOptions) (*storagev1beta1.StorageClass, error) + Update(ctx context.Context, storageClass *storagev1beta1.StorageClass, opts v1.UpdateOptions) (*storagev1beta1.StorageClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StorageClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.StorageClass, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.StorageClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) - Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.StorageClass, err error) + Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.StorageClass, err error) StorageClassExpansion } // storageClasses implements StorageClassInterface type storageClasses struct { - *gentype.ClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1beta1.StorageClass, *storagev1beta1.StorageClassList, *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1beta1Client) *storageClasses { return &storageClasses{ - gentype.NewClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1beta1.StorageClass, *storagev1beta1.StorageClassList, *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration]( "storageclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} }, - func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }), + func() *storagev1beta1.StorageClass { return &storagev1beta1.StorageClass{} }, + func() *storagev1beta1.StorageClassList { return &storagev1beta1.StorageClassList{} }, + gentype.PrefersProtobuf[*storagev1beta1.StorageClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 01024ce48..42c1bd7e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,38 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) - Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.CreateOptions) (*storagev1beta1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttachment, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttachment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.VolumeAttachment, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.VolumeAttachmentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) - Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.VolumeAttachment, err error) + Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttachment, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) + ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - *gentype.ClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1beta1.VolumeAttachment, *storagev1beta1.VolumeAttachmentList, *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { return &volumeAttachments{ - gentype.NewClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1beta1.VolumeAttachment, *storagev1beta1.VolumeAttachmentList, *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration]( "volumeattachments", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} }, - func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }), + func() *storagev1beta1.VolumeAttachment { return &storagev1beta1.VolumeAttachment{} }, + func() *storagev1beta1.VolumeAttachmentList { return &storagev1beta1.VolumeAttachmentList{} }, + gentype.PrefersProtobuf[*storagev1beta1.VolumeAttachment](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go index 47eadcac6..09f9f1178 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go @@ -19,13 +19,13 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,32 +38,34 @@ type VolumeAttributesClassesGetter interface { // VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. type VolumeAttributesClassInterface interface { - Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*v1beta1.VolumeAttributesClass, error) - Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1beta1.VolumeAttributesClass, error) + Create(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*storagev1beta1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttributesClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttributesClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttributesClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.VolumeAttributesClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) - Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttributesClass, err error) VolumeAttributesClassExpansion } // volumeAttributesClasses implements VolumeAttributesClassInterface type volumeAttributesClasses struct { - *gentype.ClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration] + *gentype.ClientWithListAndApply[*storagev1beta1.VolumeAttributesClass, *storagev1beta1.VolumeAttributesClassList, *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration] } // newVolumeAttributesClasses returns a VolumeAttributesClasses func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses { return &volumeAttributesClasses{ - gentype.NewClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagev1beta1.VolumeAttributesClass, *storagev1beta1.VolumeAttributesClassList, *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration]( "volumeattributesclasses", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} }, - func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }), + func() *storagev1beta1.VolumeAttributesClass { return &storagev1beta1.VolumeAttributesClass{} }, + func() *storagev1beta1.VolumeAttributesClassList { return &storagev1beta1.VolumeAttributesClassList{} }, + gentype.PrefersProtobuf[*storagev1beta1.VolumeAttributesClass](), + ), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go index 3ae8f4ae5..c33a1c016 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go @@ -29,7 +29,7 @@ type FakeStoragemigrationV1alpha1 struct { } func (c *FakeStoragemigrationV1alpha1) StorageVersionMigrations() v1alpha1.StorageVersionMigrationInterface { - return &FakeStorageVersionMigrations{c} + return newFakeStorageVersionMigrations(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go index c3ff23591..02de9f302 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go @@ -19,168 +19,35 @@ limitations under the License. package fake import ( - "context" - json "encoding/json" - "fmt" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" + typedstoragemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" ) -// FakeStorageVersionMigrations implements StorageVersionMigrationInterface -type FakeStorageVersionMigrations struct { +// fakeStorageVersionMigrations implements StorageVersionMigrationInterface +type fakeStorageVersionMigrations struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] Fake *FakeStoragemigrationV1alpha1 } -var storageversionmigrationsResource = v1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations") - -var storageversionmigrationsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration") - -// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. -func (c *FakeStorageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(storageversionmigrationsResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. -func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { - emptyResult := &v1alpha1.StorageVersionMigrationList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(storageversionmigrationsResource, storageversionmigrationsKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.StorageVersionMigrationList{ListMeta: obj.(*v1alpha1.StorageVersionMigrationList).ListMeta} - for _, item := range obj.(*v1alpha1.StorageVersionMigrationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageVersionMigrations. -func (c *FakeStorageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionmigrationsResource, opts)) -} - -// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *FakeStorageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *FakeStorageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionmigrationsResource, "status", storageVersionMigration, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. -func (c *FakeStorageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageversionmigrationsResource, name, opts), &v1alpha1.StorageVersionMigration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(storageversionmigrationsResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionMigrationList{}) - return err -} - -// Patch applies the patch and returns the patched storageVersionMigration. -func (c *FakeStorageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersionMigration. -func (c *FakeStorageVersionMigrations) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStorageVersionMigrations) ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - emptyResult := &v1alpha1.StorageVersionMigration{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - if obj == nil { - return emptyResult, err +func newFakeStorageVersionMigrations(fake *FakeStoragemigrationV1alpha1) typedstoragemigrationv1alpha1.StorageVersionMigrationInterface { + return &fakeStorageVersionMigrations{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"), + v1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration"), + func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} }, + func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }, + func(dst, src *v1alpha1.StorageVersionMigrationList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.StorageVersionMigrationList) []*v1alpha1.StorageVersionMigration { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.StorageVersionMigrationList, items []*v1alpha1.StorageVersionMigration) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.StorageVersionMigration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go index 613e45355..dcd5a4bf8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *StoragemigrationV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := storagemigrationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go index 5fc0fd519..5c6981ec8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" ) @@ -38,36 +38,42 @@ type StorageVersionMigrationsGetter interface { // StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources. type StorageVersionMigrationInterface interface { - Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) - Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + Create(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) + Update(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + UpdateStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) + List(ctx context.Context, opts v1.ListOptions) (*storagemigrationv1alpha1.StorageVersionMigrationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) - Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagemigrationv1alpha1.StorageVersionMigration, err error) + Apply(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error) // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + ApplyStatus(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error) StorageVersionMigrationExpansion } // storageVersionMigrations implements StorageVersionMigrationInterface type storageVersionMigrations struct { - *gentype.ClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] + *gentype.ClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] } // newStorageVersionMigrations returns a StorageVersionMigrations func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations { return &storageVersionMigrations{ - gentype.NewClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( + gentype.NewClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( "storageversionmigrations", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} }, - func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }), + func() *storagemigrationv1alpha1.StorageVersionMigration { + return &storagemigrationv1alpha1.StorageVersionMigration{} + }, + func() *storagemigrationv1alpha1.StorageVersionMigrationList { + return &storagemigrationv1alpha1.StorageVersionMigrationList{} + }, + gentype.PrefersProtobuf[*storagemigrationv1alpha1.StorageVersionMigration](), + ), } } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go index 4ab267e42..b3efc72a4 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. @@ -30,19 +30,19 @@ import ( type MutatingWebhookConfigurationLister interface { // List lists all MutatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.MutatingWebhookConfiguration, err error) // Get retrieves the MutatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.MutatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1.MutatingWebhookConfiguration, error) MutatingWebhookConfigurationListerExpansion } // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - listers.ResourceIndexer[*v1.MutatingWebhookConfiguration] + listers.ResourceIndexer[*admissionregistrationv1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{listers.New[*v1.MutatingWebhookConfiguration](indexer, v1.Resource("mutatingwebhookconfiguration"))} + return &mutatingWebhookConfigurationLister{listers.New[*admissionregistrationv1.MutatingWebhookConfiguration](indexer, admissionregistrationv1.Resource("mutatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go index f233cdbe8..07a181cd4 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. @@ -30,19 +30,19 @@ import ( type ValidatingAdmissionPolicyLister interface { // List lists all ValidatingAdmissionPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicy, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.ValidatingAdmissionPolicy, err error) // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ValidatingAdmissionPolicy, error) + Get(name string) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) ValidatingAdmissionPolicyListerExpansion } // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - listers.ResourceIndexer[*v1.ValidatingAdmissionPolicy] + listers.ResourceIndexer[*admissionregistrationv1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{listers.New[*v1.ValidatingAdmissionPolicy](indexer, v1.Resource("validatingadmissionpolicy"))} + return &validatingAdmissionPolicyLister{listers.New[*admissionregistrationv1.ValidatingAdmissionPolicy](indexer, admissionregistrationv1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go index 450a06672..131eb12b2 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. @@ -30,19 +30,19 @@ import ( type ValidatingAdmissionPolicyBindingLister interface { // List lists all ValidatingAdmissionPolicyBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicyBinding, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error) // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ValidatingAdmissionPolicyBinding, error) + Get(name string) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) ValidatingAdmissionPolicyBindingListerExpansion } // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - listers.ResourceIndexer[*v1.ValidatingAdmissionPolicyBinding] + listers.ResourceIndexer[*admissionregistrationv1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{listers.New[*v1.ValidatingAdmissionPolicyBinding](indexer, v1.Resource("validatingadmissionpolicybinding"))} + return &validatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1.ValidatingAdmissionPolicyBinding](indexer, admissionregistrationv1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go index 99045a675..50e1624c9 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. @@ -30,19 +30,19 @@ import ( type ValidatingWebhookConfigurationLister interface { // List lists all ValidatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.ValidatingWebhookConfiguration, err error) // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ValidatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) ValidatingWebhookConfigurationListerExpansion } // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - listers.ResourceIndexer[*v1.ValidatingWebhookConfiguration] + listers.ResourceIndexer[*admissionregistrationv1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{listers.New[*v1.ValidatingWebhookConfiguration](indexer, v1.Resource("validatingwebhookconfiguration"))} + return &validatingWebhookConfigurationLister{listers.New[*admissionregistrationv1.ValidatingWebhookConfiguration](indexer, admissionregistrationv1.Resource("validatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go index 3f8b7819c..701784de0 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -18,6 +18,14 @@ limitations under the License. package v1alpha1 +// MutatingAdmissionPolicyListerExpansion allows custom methods to be added to +// MutatingAdmissionPolicyLister. +type MutatingAdmissionPolicyListerExpansion interface{} + +// MutatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to +// MutatingAdmissionPolicyBindingLister. +type MutatingAdmissionPolicyBindingListerExpansion interface{} + // ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to // ValidatingAdmissionPolicyLister. type ValidatingAdmissionPolicyListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..debeb79d2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyLister helps list MutatingAdmissionPolicies. +// All objects returned here must be treated as read-only. +type MutatingAdmissionPolicyLister interface { + // List lists all MutatingAdmissionPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error) + // Get retrieves the MutatingAdmissionPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + MutatingAdmissionPolicyListerExpansion +} + +// mutatingAdmissionPolicyLister implements the MutatingAdmissionPolicyLister interface. +type mutatingAdmissionPolicyLister struct { + listers.ResourceIndexer[*admissionregistrationv1alpha1.MutatingAdmissionPolicy] +} + +// NewMutatingAdmissionPolicyLister returns a new MutatingAdmissionPolicyLister. +func NewMutatingAdmissionPolicyLister(indexer cache.Indexer) MutatingAdmissionPolicyLister { + return &mutatingAdmissionPolicyLister{listers.New[*admissionregistrationv1alpha1.MutatingAdmissionPolicy](indexer, admissionregistrationv1alpha1.Resource("mutatingadmissionpolicy"))} +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..bcad29048 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyBindingLister helps list MutatingAdmissionPolicyBindings. +// All objects returned here must be treated as read-only. +type MutatingAdmissionPolicyBindingLister interface { + // List lists all MutatingAdmissionPolicyBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error) + // Get retrieves the MutatingAdmissionPolicyBinding from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + MutatingAdmissionPolicyBindingListerExpansion +} + +// mutatingAdmissionPolicyBindingLister implements the MutatingAdmissionPolicyBindingLister interface. +type mutatingAdmissionPolicyBindingLister struct { + listers.ResourceIndexer[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding] +} + +// NewMutatingAdmissionPolicyBindingLister returns a new MutatingAdmissionPolicyBindingLister. +func NewMutatingAdmissionPolicyBindingLister(indexer cache.Indexer) MutatingAdmissionPolicyBindingLister { + return &mutatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding](indexer, admissionregistrationv1alpha1.Resource("mutatingadmissionpolicybinding"))} +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go index c3aec2d73..aa4d320cc 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. @@ -30,19 +30,19 @@ import ( type ValidatingAdmissionPolicyLister interface { // List lists all ValidatingAdmissionPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) + Get(name string) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) ValidatingAdmissionPolicyListerExpansion } // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicy] + listers.ResourceIndexer[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{listers.New[*v1alpha1.ValidatingAdmissionPolicy](indexer, v1alpha1.Resource("validatingadmissionpolicy"))} + return &validatingAdmissionPolicyLister{listers.New[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy](indexer, admissionregistrationv1alpha1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 5a2cf79c5..84edc9fb7 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. @@ -30,19 +30,19 @@ import ( type ValidatingAdmissionPolicyBindingLister interface { // List lists all ValidatingAdmissionPolicyBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error) // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Get(name string) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) ValidatingAdmissionPolicyBindingListerExpansion } // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicyBinding] + listers.ResourceIndexer[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{listers.New[*v1alpha1.ValidatingAdmissionPolicyBinding](indexer, v1alpha1.Resource("validatingadmissionpolicybinding"))} + return &validatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding](indexer, admissionregistrationv1alpha1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 3bad49ac0..67588f13b 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. @@ -30,19 +30,19 @@ import ( type MutatingWebhookConfigurationLister interface { // List lists all MutatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.MutatingWebhookConfiguration, err error) // Get retrieves the MutatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) MutatingWebhookConfigurationListerExpansion } // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - listers.ResourceIndexer[*v1beta1.MutatingWebhookConfiguration] + listers.ResourceIndexer[*admissionregistrationv1beta1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{listers.New[*v1beta1.MutatingWebhookConfiguration](indexer, v1beta1.Resource("mutatingwebhookconfiguration"))} + return &mutatingWebhookConfigurationLister{listers.New[*admissionregistrationv1beta1.MutatingWebhookConfiguration](indexer, admissionregistrationv1beta1.Resource("mutatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go index 74d7c6ce3..38237220b 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. @@ -30,19 +30,19 @@ import ( type ValidatingAdmissionPolicyLister interface { // List lists all ValidatingAdmissionPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) + Get(name string) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) ValidatingAdmissionPolicyListerExpansion } // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicy] + listers.ResourceIndexer[*admissionregistrationv1beta1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{listers.New[*v1beta1.ValidatingAdmissionPolicy](indexer, v1beta1.Resource("validatingadmissionpolicy"))} + return &validatingAdmissionPolicyLister{listers.New[*admissionregistrationv1beta1.ValidatingAdmissionPolicy](indexer, admissionregistrationv1beta1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 668d652bb..071b1188d 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. @@ -30,19 +30,19 @@ import ( type ValidatingAdmissionPolicyBindingLister interface { // List lists all ValidatingAdmissionPolicyBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error) // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Get(name string) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) ValidatingAdmissionPolicyBindingListerExpansion } // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicyBinding] + listers.ResourceIndexer[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{listers.New[*v1beta1.ValidatingAdmissionPolicyBinding](indexer, v1beta1.Resource("validatingadmissionpolicybinding"))} + return &validatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding](indexer, admissionregistrationv1beta1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 16167d573..b16546b6d 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. @@ -30,19 +30,19 @@ import ( type ValidatingWebhookConfigurationLister interface { // List lists all ValidatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error) // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) ValidatingWebhookConfigurationListerExpansion } // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - listers.ResourceIndexer[*v1beta1.ValidatingWebhookConfiguration] + listers.ResourceIndexer[*admissionregistrationv1beta1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{listers.New[*v1beta1.ValidatingWebhookConfiguration](indexer, v1beta1.Resource("validatingwebhookconfiguration"))} + return &validatingWebhookConfigurationLister{listers.New[*admissionregistrationv1beta1.ValidatingWebhookConfiguration](indexer, admissionregistrationv1beta1.Resource("validatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go index ce51b88f2..1d24fb893 100644 --- a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageVersionLister helps list StorageVersions. @@ -30,19 +30,19 @@ import ( type StorageVersionLister interface { // List lists all StorageVersions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) + List(selector labels.Selector) (ret []*apiserverinternalv1alpha1.StorageVersion, err error) // Get retrieves the StorageVersion from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.StorageVersion, error) + Get(name string) (*apiserverinternalv1alpha1.StorageVersion, error) StorageVersionListerExpansion } // storageVersionLister implements the StorageVersionLister interface. type storageVersionLister struct { - listers.ResourceIndexer[*v1alpha1.StorageVersion] + listers.ResourceIndexer[*apiserverinternalv1alpha1.StorageVersion] } // NewStorageVersionLister returns a new StorageVersionLister. func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister { - return &storageVersionLister{listers.New[*v1alpha1.StorageVersion](indexer, v1alpha1.Resource("storageversion"))} + return &storageVersionLister{listers.New[*apiserverinternalv1alpha1.StorageVersion](indexer, apiserverinternalv1alpha1.Resource("storageversion"))} } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go index b9061b159..58ab1357e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ControllerRevisionLister helps list ControllerRevisions. @@ -30,7 +30,7 @@ import ( type ControllerRevisionLister interface { // List lists all ControllerRevisions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1.ControllerRevision, err error) // ControllerRevisions returns an object that can list and get ControllerRevisions. ControllerRevisions(namespace string) ControllerRevisionNamespaceLister ControllerRevisionListerExpansion @@ -38,17 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - listers.ResourceIndexer[*v1.ControllerRevision] + listers.ResourceIndexer[*appsv1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{listers.New[*v1.ControllerRevision](indexer, v1.Resource("controllerrevision"))} + return &controllerRevisionLister{listers.New[*appsv1.ControllerRevision](indexer, appsv1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1.ControllerRevision](s.ResourceIndexer, namespace)} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*appsv1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -56,15 +56,15 @@ func (s *controllerRevisionLister) ControllerRevisions(namespace string) Control type ControllerRevisionNamespaceLister interface { // List lists all ControllerRevisions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1.ControllerRevision, err error) // Get retrieves the ControllerRevision from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ControllerRevision, error) + Get(name string) (*appsv1.ControllerRevision, error) ControllerRevisionNamespaceListerExpansion } // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - listers.ResourceIndexer[*v1.ControllerRevision] + listers.ResourceIndexer[*appsv1.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go index 4240cb624..b6ba54f80 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DaemonSetLister helps list DaemonSets. @@ -30,7 +30,7 @@ import ( type DaemonSetLister interface { // List lists all DaemonSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1.DaemonSet, err error) // DaemonSets returns an object that can list and get DaemonSets. DaemonSets(namespace string) DaemonSetNamespaceLister DaemonSetListerExpansion @@ -38,17 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - listers.ResourceIndexer[*v1.DaemonSet] + listers.ResourceIndexer[*appsv1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{listers.New[*v1.DaemonSet](indexer, v1.Resource("daemonset"))} + return &daemonSetLister{listers.New[*appsv1.DaemonSet](indexer, appsv1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{listers.NewNamespaced[*v1.DaemonSet](s.ResourceIndexer, namespace)} + return daemonSetNamespaceLister{listers.NewNamespaced[*appsv1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -56,15 +56,15 @@ func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister type DaemonSetNamespaceLister interface { // List lists all DaemonSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1.DaemonSet, err error) // Get retrieves the DaemonSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.DaemonSet, error) + Get(name string) (*appsv1.DaemonSet, error) DaemonSetNamespaceListerExpansion } // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - listers.ResourceIndexer[*v1.DaemonSet] + listers.ResourceIndexer[*appsv1.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go index 3337026b7..ed1b899e2 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,17 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - listers.ResourceIndexer[*v1.Deployment] + listers.ResourceIndexer[*appsv1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{listers.New[*v1.Deployment](indexer, v1.Resource("deployment"))} + return &deploymentLister{listers.New[*appsv1.Deployment](indexer, appsv1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{listers.NewNamespaced[*v1.Deployment](s.ResourceIndexer, namespace)} + return deploymentNamespaceLister{listers.NewNamespaced[*appsv1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -56,15 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Deployment, error) + Get(name string) (*appsv1.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - listers.ResourceIndexer[*v1.Deployment] + listers.ResourceIndexer[*appsv1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go index 244df1d33..68308e352 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicaSetLister helps list ReplicaSets. @@ -30,7 +30,7 @@ import ( type ReplicaSetLister interface { // List lists all ReplicaSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1.ReplicaSet, err error) // ReplicaSets returns an object that can list and get ReplicaSets. ReplicaSets(namespace string) ReplicaSetNamespaceLister ReplicaSetListerExpansion @@ -38,17 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - listers.ResourceIndexer[*v1.ReplicaSet] + listers.ResourceIndexer[*appsv1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{listers.New[*v1.ReplicaSet](indexer, v1.Resource("replicaset"))} + return &replicaSetLister{listers.New[*appsv1.ReplicaSet](indexer, appsv1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{listers.NewNamespaced[*v1.ReplicaSet](s.ResourceIndexer, namespace)} + return replicaSetNamespaceLister{listers.NewNamespaced[*appsv1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -56,15 +56,15 @@ func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceList type ReplicaSetNamespaceLister interface { // List lists all ReplicaSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1.ReplicaSet, err error) // Get retrieves the ReplicaSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ReplicaSet, error) + Get(name string) (*appsv1.ReplicaSet, error) ReplicaSetNamespaceListerExpansion } // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - listers.ResourceIndexer[*v1.ReplicaSet] + listers.ResourceIndexer[*appsv1.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go index a8dc1b022..7366b5a3d 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StatefulSetLister helps list StatefulSets. @@ -30,7 +30,7 @@ import ( type StatefulSetLister interface { // List lists all StatefulSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1.StatefulSet, err error) // StatefulSets returns an object that can list and get StatefulSets. StatefulSets(namespace string) StatefulSetNamespaceLister StatefulSetListerExpansion @@ -38,17 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - listers.ResourceIndexer[*v1.StatefulSet] + listers.ResourceIndexer[*appsv1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{listers.New[*v1.StatefulSet](indexer, v1.Resource("statefulset"))} + return &statefulSetLister{listers.New[*appsv1.StatefulSet](indexer, appsv1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{listers.NewNamespaced[*v1.StatefulSet](s.ResourceIndexer, namespace)} + return statefulSetNamespaceLister{listers.NewNamespaced[*appsv1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -56,15 +56,15 @@ func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceL type StatefulSetNamespaceLister interface { // List lists all StatefulSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1.StatefulSet, err error) // Get retrieves the StatefulSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.StatefulSet, error) + Get(name string) (*appsv1.StatefulSet, error) StatefulSetNamespaceListerExpansion } // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - listers.ResourceIndexer[*v1.StatefulSet] + listers.ResourceIndexer[*appsv1.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go index c5e8fb373..7c7f530b6 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta1 "k8s.io/api/apps/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ControllerRevisionLister helps list ControllerRevisions. @@ -30,7 +30,7 @@ import ( type ControllerRevisionLister interface { // List lists all ControllerRevisions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta1.ControllerRevision, err error) // ControllerRevisions returns an object that can list and get ControllerRevisions. ControllerRevisions(namespace string) ControllerRevisionNamespaceLister ControllerRevisionListerExpansion @@ -38,17 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - listers.ResourceIndexer[*v1beta1.ControllerRevision] + listers.ResourceIndexer[*appsv1beta1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{listers.New[*v1beta1.ControllerRevision](indexer, v1beta1.Resource("controllerrevision"))} + return &controllerRevisionLister{listers.New[*appsv1beta1.ControllerRevision](indexer, appsv1beta1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta1.ControllerRevision](s.ResourceIndexer, namespace)} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*appsv1beta1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -56,15 +56,15 @@ func (s *controllerRevisionLister) ControllerRevisions(namespace string) Control type ControllerRevisionNamespaceLister interface { // List lists all ControllerRevisions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta1.ControllerRevision, err error) // Get retrieves the ControllerRevision from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ControllerRevision, error) + Get(name string) (*appsv1beta1.ControllerRevision, error) ControllerRevisionNamespaceListerExpansion } // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.ControllerRevision] + listers.ResourceIndexer[*appsv1beta1.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go index 1bc6d45ad..6803fc8cd 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta1 "k8s.io/api/apps/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta1.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,17 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - listers.ResourceIndexer[*v1beta1.Deployment] + listers.ResourceIndexer[*appsv1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))} + return &deploymentLister{listers.New[*appsv1beta1.Deployment](indexer, appsv1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)} + return deploymentNamespaceLister{listers.NewNamespaced[*appsv1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -56,15 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta1.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Deployment, error) + Get(name string) (*appsv1beta1.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Deployment] + listers.ResourceIndexer[*appsv1beta1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go index 4bf103aef..040b65dc1 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta1 "k8s.io/api/apps/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StatefulSetLister helps list StatefulSets. @@ -30,7 +30,7 @@ import ( type StatefulSetLister interface { // List lists all StatefulSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta1.StatefulSet, err error) // StatefulSets returns an object that can list and get StatefulSets. StatefulSets(namespace string) StatefulSetNamespaceLister StatefulSetListerExpansion @@ -38,17 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - listers.ResourceIndexer[*v1beta1.StatefulSet] + listers.ResourceIndexer[*appsv1beta1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{listers.New[*v1beta1.StatefulSet](indexer, v1beta1.Resource("statefulset"))} + return &statefulSetLister{listers.New[*appsv1beta1.StatefulSet](indexer, appsv1beta1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta1.StatefulSet](s.ResourceIndexer, namespace)} + return statefulSetNamespaceLister{listers.NewNamespaced[*appsv1beta1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -56,15 +56,15 @@ func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceL type StatefulSetNamespaceLister interface { // List lists all StatefulSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta1.StatefulSet, err error) // Get retrieves the StatefulSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.StatefulSet, error) + Get(name string) (*appsv1beta1.StatefulSet, error) StatefulSetNamespaceListerExpansion } // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.StatefulSet] + listers.ResourceIndexer[*appsv1beta1.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go index de941bc69..9e72122f3 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ControllerRevisionLister helps list ControllerRevisions. @@ -30,7 +30,7 @@ import ( type ControllerRevisionLister interface { // List lists all ControllerRevisions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ControllerRevision, err error) // ControllerRevisions returns an object that can list and get ControllerRevisions. ControllerRevisions(namespace string) ControllerRevisionNamespaceLister ControllerRevisionListerExpansion @@ -38,17 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - listers.ResourceIndexer[*v1beta2.ControllerRevision] + listers.ResourceIndexer[*appsv1beta2.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{listers.New[*v1beta2.ControllerRevision](indexer, v1beta2.Resource("controllerrevision"))} + return &controllerRevisionLister{listers.New[*appsv1beta2.ControllerRevision](indexer, appsv1beta2.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta2.ControllerRevision](s.ResourceIndexer, namespace)} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*appsv1beta2.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -56,15 +56,15 @@ func (s *controllerRevisionLister) ControllerRevisions(namespace string) Control type ControllerRevisionNamespaceLister interface { // List lists all ControllerRevisions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ControllerRevision, err error) // Get retrieves the ControllerRevision from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.ControllerRevision, error) + Get(name string) (*appsv1beta2.ControllerRevision, error) ControllerRevisionNamespaceListerExpansion } // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - listers.ResourceIndexer[*v1beta2.ControllerRevision] + listers.ResourceIndexer[*appsv1beta2.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go index 37784fe88..16b76b613 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DaemonSetLister helps list DaemonSets. @@ -30,7 +30,7 @@ import ( type DaemonSetLister interface { // List lists all DaemonSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.DaemonSet, err error) // DaemonSets returns an object that can list and get DaemonSets. DaemonSets(namespace string) DaemonSetNamespaceLister DaemonSetListerExpansion @@ -38,17 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - listers.ResourceIndexer[*v1beta2.DaemonSet] + listers.ResourceIndexer[*appsv1beta2.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{listers.New[*v1beta2.DaemonSet](indexer, v1beta2.Resource("daemonset"))} + return &daemonSetLister{listers.New[*appsv1beta2.DaemonSet](indexer, appsv1beta2.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta2.DaemonSet](s.ResourceIndexer, namespace)} + return daemonSetNamespaceLister{listers.NewNamespaced[*appsv1beta2.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -56,15 +56,15 @@ func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister type DaemonSetNamespaceLister interface { // List lists all DaemonSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.DaemonSet, err error) // Get retrieves the DaemonSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.DaemonSet, error) + Get(name string) (*appsv1beta2.DaemonSet, error) DaemonSetNamespaceListerExpansion } // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - listers.ResourceIndexer[*v1beta2.DaemonSet] + listers.ResourceIndexer[*appsv1beta2.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go index 75acc1693..c0b5b2ab1 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta2.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,17 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - listers.ResourceIndexer[*v1beta2.Deployment] + listers.ResourceIndexer[*appsv1beta2.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{listers.New[*v1beta2.Deployment](indexer, v1beta2.Resource("deployment"))} + return &deploymentLister{listers.New[*appsv1beta2.Deployment](indexer, appsv1beta2.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{listers.NewNamespaced[*v1beta2.Deployment](s.ResourceIndexer, namespace)} + return deploymentNamespaceLister{listers.NewNamespaced[*appsv1beta2.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -56,15 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta2.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.Deployment, error) + Get(name string) (*appsv1beta2.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - listers.ResourceIndexer[*v1beta2.Deployment] + listers.ResourceIndexer[*appsv1beta2.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go index 37ea97630..7d5f3c80e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicaSetLister helps list ReplicaSets. @@ -30,7 +30,7 @@ import ( type ReplicaSetLister interface { // List lists all ReplicaSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ReplicaSet, err error) // ReplicaSets returns an object that can list and get ReplicaSets. ReplicaSets(namespace string) ReplicaSetNamespaceLister ReplicaSetListerExpansion @@ -38,17 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - listers.ResourceIndexer[*v1beta2.ReplicaSet] + listers.ResourceIndexer[*appsv1beta2.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{listers.New[*v1beta2.ReplicaSet](indexer, v1beta2.Resource("replicaset"))} + return &replicaSetLister{listers.New[*appsv1beta2.ReplicaSet](indexer, appsv1beta2.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta2.ReplicaSet](s.ResourceIndexer, namespace)} + return replicaSetNamespaceLister{listers.NewNamespaced[*appsv1beta2.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -56,15 +56,15 @@ func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceList type ReplicaSetNamespaceLister interface { // List lists all ReplicaSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ReplicaSet, err error) // Get retrieves the ReplicaSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.ReplicaSet, error) + Get(name string) (*appsv1beta2.ReplicaSet, error) ReplicaSetNamespaceListerExpansion } // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - listers.ResourceIndexer[*v1beta2.ReplicaSet] + listers.ResourceIndexer[*appsv1beta2.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go index cc48a1473..04dffb2d8 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StatefulSetLister helps list StatefulSets. @@ -30,7 +30,7 @@ import ( type StatefulSetLister interface { // List lists all StatefulSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.StatefulSet, err error) // StatefulSets returns an object that can list and get StatefulSets. StatefulSets(namespace string) StatefulSetNamespaceLister StatefulSetListerExpansion @@ -38,17 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - listers.ResourceIndexer[*v1beta2.StatefulSet] + listers.ResourceIndexer[*appsv1beta2.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{listers.New[*v1beta2.StatefulSet](indexer, v1beta2.Resource("statefulset"))} + return &statefulSetLister{listers.New[*appsv1beta2.StatefulSet](indexer, appsv1beta2.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta2.StatefulSet](s.ResourceIndexer, namespace)} + return statefulSetNamespaceLister{listers.NewNamespaced[*appsv1beta2.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -56,15 +56,15 @@ func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceL type StatefulSetNamespaceLister interface { // List lists all StatefulSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.StatefulSet, err error) // Get retrieves the StatefulSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.StatefulSet, error) + Get(name string) (*appsv1beta2.StatefulSet, error) StatefulSetNamespaceListerExpansion } // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - listers.ResourceIndexer[*v1beta2.StatefulSet] + listers.ResourceIndexer[*appsv1beta2.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go index 2cd4cc87b..b4e577064 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + autoscalingv1 "k8s.io/api/autoscaling/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv1.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,17 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - listers.ResourceIndexer[*v1.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{listers.New[*v1.HorizontalPodAutoscaler](indexer, v1.Resource("horizontalpodautoscaler"))} + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv1.HorizontalPodAutoscaler](indexer, autoscalingv1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -56,15 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv1.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - listers.ResourceIndexer[*v1.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv1.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go index 7c2806af2..aafc57683 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + autoscalingv2 "k8s.io/api/autoscaling/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,17 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - listers.ResourceIndexer[*v2.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{listers.New[*v2.HorizontalPodAutoscaler](indexer, v2.Resource("horizontalpodautoscaler"))} + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv2.HorizontalPodAutoscaler](indexer, autoscalingv2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -56,15 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv2.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - listers.ResourceIndexer[*v2.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv2.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go index a2befd606..b7ad8e79b 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta1.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,17 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv2beta1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{listers.New[*v2beta1.HorizontalPodAutoscaler](indexer, v2beta1.Resource("horizontalpodautoscaler"))} + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv2beta1.HorizontalPodAutoscaler](indexer, autoscalingv2beta1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv2beta1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -56,15 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta1.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv2beta1.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go index 52bae849b..8d0fa6e79 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta2.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,17 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv2beta2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{listers.New[*v2beta2.HorizontalPodAutoscaler](indexer, v2beta2.Resource("horizontalpodautoscaler"))} + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv2beta2.HorizontalPodAutoscaler](indexer, autoscalingv2beta2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv2beta2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -56,15 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta2.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta2.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler] + listers.ResourceIndexer[*autoscalingv2beta2.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go index a7a3abbfa..7ad2b87f0 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + batchv1 "k8s.io/api/batch/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CronJobLister helps list CronJobs. @@ -30,7 +30,7 @@ import ( type CronJobLister interface { // List lists all CronJobs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1.CronJob, err error) // CronJobs returns an object that can list and get CronJobs. CronJobs(namespace string) CronJobNamespaceLister CronJobListerExpansion @@ -38,17 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - listers.ResourceIndexer[*v1.CronJob] + listers.ResourceIndexer[*batchv1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{listers.New[*v1.CronJob](indexer, v1.Resource("cronjob"))} + return &cronJobLister{listers.New[*batchv1.CronJob](indexer, batchv1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{listers.NewNamespaced[*v1.CronJob](s.ResourceIndexer, namespace)} + return cronJobNamespaceLister{listers.NewNamespaced[*batchv1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -56,15 +56,15 @@ func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { type CronJobNamespaceLister interface { // List lists all CronJobs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1.CronJob, err error) // Get retrieves the CronJob from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CronJob, error) + Get(name string) (*batchv1.CronJob, error) CronJobNamespaceListerExpansion } // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - listers.ResourceIndexer[*v1.CronJob] + listers.ResourceIndexer[*batchv1.CronJob] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job.go b/vendor/k8s.io/client-go/listers/batch/v1/job.go index 4078a9f7d..eb9f300d9 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/job.go +++ b/vendor/k8s.io/client-go/listers/batch/v1/job.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + batchv1 "k8s.io/api/batch/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // JobLister helps list Jobs. @@ -30,7 +30,7 @@ import ( type JobLister interface { // List lists all Jobs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Job, err error) + List(selector labels.Selector) (ret []*batchv1.Job, err error) // Jobs returns an object that can list and get Jobs. Jobs(namespace string) JobNamespaceLister JobListerExpansion @@ -38,17 +38,17 @@ type JobLister interface { // jobLister implements the JobLister interface. type jobLister struct { - listers.ResourceIndexer[*v1.Job] + listers.ResourceIndexer[*batchv1.Job] } // NewJobLister returns a new JobLister. func NewJobLister(indexer cache.Indexer) JobLister { - return &jobLister{listers.New[*v1.Job](indexer, v1.Resource("job"))} + return &jobLister{listers.New[*batchv1.Job](indexer, batchv1.Resource("job"))} } // Jobs returns an object that can list and get Jobs. func (s *jobLister) Jobs(namespace string) JobNamespaceLister { - return jobNamespaceLister{listers.NewNamespaced[*v1.Job](s.ResourceIndexer, namespace)} + return jobNamespaceLister{listers.NewNamespaced[*batchv1.Job](s.ResourceIndexer, namespace)} } // JobNamespaceLister helps list and get Jobs. @@ -56,15 +56,15 @@ func (s *jobLister) Jobs(namespace string) JobNamespaceLister { type JobNamespaceLister interface { // List lists all Jobs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Job, err error) + List(selector labels.Selector) (ret []*batchv1.Job, err error) // Get retrieves the Job from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Job, error) + Get(name string) (*batchv1.Job, error) JobNamespaceListerExpansion } // jobNamespaceLister implements the JobNamespaceLister // interface. type jobNamespaceLister struct { - listers.ResourceIndexer[*v1.Job] + listers.ResourceIndexer[*batchv1.Job] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go index 33ed8219e..d5e0ec2c8 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + batchv1beta1 "k8s.io/api/batch/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CronJobLister helps list CronJobs. @@ -30,7 +30,7 @@ import ( type CronJobLister interface { // List lists all CronJobs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1beta1.CronJob, err error) // CronJobs returns an object that can list and get CronJobs. CronJobs(namespace string) CronJobNamespaceLister CronJobListerExpansion @@ -38,17 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - listers.ResourceIndexer[*v1beta1.CronJob] + listers.ResourceIndexer[*batchv1beta1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{listers.New[*v1beta1.CronJob](indexer, v1beta1.Resource("cronjob"))} + return &cronJobLister{listers.New[*batchv1beta1.CronJob](indexer, batchv1beta1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{listers.NewNamespaced[*v1beta1.CronJob](s.ResourceIndexer, namespace)} + return cronJobNamespaceLister{listers.NewNamespaced[*batchv1beta1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -56,15 +56,15 @@ func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { type CronJobNamespaceLister interface { // List lists all CronJobs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1beta1.CronJob, err error) // Get retrieves the CronJob from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CronJob, error) + Get(name string) (*batchv1beta1.CronJob, error) CronJobNamespaceListerExpansion } // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.CronJob] + listers.ResourceIndexer[*batchv1beta1.CronJob] } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go index 38e4a3a65..7dbb0f3f5 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/certificates/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + certificatesv1 "k8s.io/api/certificates/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CertificateSigningRequestLister helps list CertificateSigningRequests. @@ -30,19 +30,19 @@ import ( type CertificateSigningRequestLister interface { // List lists all CertificateSigningRequests in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CertificateSigningRequest, err error) + List(selector labels.Selector) (ret []*certificatesv1.CertificateSigningRequest, err error) // Get retrieves the CertificateSigningRequest from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CertificateSigningRequest, error) + Get(name string) (*certificatesv1.CertificateSigningRequest, error) CertificateSigningRequestListerExpansion } // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - listers.ResourceIndexer[*v1.CertificateSigningRequest] + listers.ResourceIndexer[*certificatesv1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{listers.New[*v1.CertificateSigningRequest](indexer, v1.Resource("certificatesigningrequest"))} + return &certificateSigningRequestLister{listers.New[*certificatesv1.CertificateSigningRequest](indexer, certificatesv1.Resource("certificatesigningrequest"))} } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go index 88e5365f4..0e7f47d80 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/certificates/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterTrustBundleLister helps list ClusterTrustBundles. @@ -30,19 +30,19 @@ import ( type ClusterTrustBundleLister interface { // List lists all ClusterTrustBundles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) + List(selector labels.Selector) (ret []*certificatesv1alpha1.ClusterTrustBundle, err error) // Get retrieves the ClusterTrustBundle from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterTrustBundle, error) + Get(name string) (*certificatesv1alpha1.ClusterTrustBundle, error) ClusterTrustBundleListerExpansion } // clusterTrustBundleLister implements the ClusterTrustBundleLister interface. type clusterTrustBundleLister struct { - listers.ResourceIndexer[*v1alpha1.ClusterTrustBundle] + listers.ResourceIndexer[*certificatesv1alpha1.ClusterTrustBundle] } // NewClusterTrustBundleLister returns a new ClusterTrustBundleLister. func NewClusterTrustBundleLister(indexer cache.Indexer) ClusterTrustBundleLister { - return &clusterTrustBundleLister{listers.New[*v1alpha1.ClusterTrustBundle](indexer, v1alpha1.Resource("clustertrustbundle"))} + return &clusterTrustBundleLister{listers.New[*certificatesv1alpha1.ClusterTrustBundle](indexer, certificatesv1alpha1.Resource("clustertrustbundle"))} } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go index 84b5ac4a9..3b4742c67 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/certificates/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CertificateSigningRequestLister helps list CertificateSigningRequests. @@ -30,19 +30,19 @@ import ( type CertificateSigningRequestLister interface { // List lists all CertificateSigningRequests in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) + List(selector labels.Selector) (ret []*certificatesv1beta1.CertificateSigningRequest, err error) // Get retrieves the CertificateSigningRequest from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CertificateSigningRequest, error) + Get(name string) (*certificatesv1beta1.CertificateSigningRequest, error) CertificateSigningRequestListerExpansion } // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - listers.ResourceIndexer[*v1beta1.CertificateSigningRequest] + listers.ResourceIndexer[*certificatesv1beta1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{listers.New[*v1beta1.CertificateSigningRequest](indexer, v1beta1.Resource("certificatesigningrequest"))} + return &certificateSigningRequestLister{listers.New[*certificatesv1beta1.CertificateSigningRequest](indexer, certificatesv1beta1.Resource("certificatesigningrequest"))} } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go index b36d8800e..7fdab2bff 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/coordination/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + coordinationv1 "k8s.io/api/coordination/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LeaseLister helps list Leases. @@ -30,7 +30,7 @@ import ( type LeaseLister interface { // List lists all Leases in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1.Lease, err error) // Leases returns an object that can list and get Leases. Leases(namespace string) LeaseNamespaceLister LeaseListerExpansion @@ -38,17 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - listers.ResourceIndexer[*v1.Lease] + listers.ResourceIndexer[*coordinationv1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{listers.New[*v1.Lease](indexer, v1.Resource("lease"))} + return &leaseLister{listers.New[*coordinationv1.Lease](indexer, coordinationv1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{listers.NewNamespaced[*v1.Lease](s.ResourceIndexer, namespace)} + return leaseNamespaceLister{listers.NewNamespaced[*coordinationv1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -56,15 +56,15 @@ func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { type LeaseNamespaceLister interface { // List lists all Leases in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1.Lease, err error) // Get retrieves the Lease from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Lease, error) + Get(name string) (*coordinationv1.Lease, error) LeaseNamespaceListerExpansion } // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - listers.ResourceIndexer[*v1.Lease] + listers.ResourceIndexer[*coordinationv1.Lease] } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go similarity index 98% rename from vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go index 233bda975..f22e7d483 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // LeaseCandidateListerExpansion allows custom methods to be added to // LeaseCandidateLister. diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go similarity index 71% rename from vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go rename to vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go index b5e5fac9e..26a3be476 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go @@ -16,13 +16,13 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1alpha1 "k8s.io/api/coordination/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LeaseCandidateLister helps list LeaseCandidates. @@ -30,7 +30,7 @@ import ( type LeaseCandidateLister interface { // List lists all LeaseCandidates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error) + List(selector labels.Selector) (ret []*coordinationv1alpha2.LeaseCandidate, err error) // LeaseCandidates returns an object that can list and get LeaseCandidates. LeaseCandidates(namespace string) LeaseCandidateNamespaceLister LeaseCandidateListerExpansion @@ -38,17 +38,17 @@ type LeaseCandidateLister interface { // leaseCandidateLister implements the LeaseCandidateLister interface. type leaseCandidateLister struct { - listers.ResourceIndexer[*v1alpha1.LeaseCandidate] + listers.ResourceIndexer[*coordinationv1alpha2.LeaseCandidate] } // NewLeaseCandidateLister returns a new LeaseCandidateLister. func NewLeaseCandidateLister(indexer cache.Indexer) LeaseCandidateLister { - return &leaseCandidateLister{listers.New[*v1alpha1.LeaseCandidate](indexer, v1alpha1.Resource("leasecandidate"))} + return &leaseCandidateLister{listers.New[*coordinationv1alpha2.LeaseCandidate](indexer, coordinationv1alpha2.Resource("leasecandidate"))} } // LeaseCandidates returns an object that can list and get LeaseCandidates. func (s *leaseCandidateLister) LeaseCandidates(namespace string) LeaseCandidateNamespaceLister { - return leaseCandidateNamespaceLister{listers.NewNamespaced[*v1alpha1.LeaseCandidate](s.ResourceIndexer, namespace)} + return leaseCandidateNamespaceLister{listers.NewNamespaced[*coordinationv1alpha2.LeaseCandidate](s.ResourceIndexer, namespace)} } // LeaseCandidateNamespaceLister helps list and get LeaseCandidates. @@ -56,15 +56,15 @@ func (s *leaseCandidateLister) LeaseCandidates(namespace string) LeaseCandidateN type LeaseCandidateNamespaceLister interface { // List lists all LeaseCandidates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error) + List(selector labels.Selector) (ret []*coordinationv1alpha2.LeaseCandidate, err error) // Get retrieves the LeaseCandidate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.LeaseCandidate, error) + Get(name string) (*coordinationv1alpha2.LeaseCandidate, error) LeaseCandidateNamespaceListerExpansion } // leaseCandidateNamespaceLister implements the LeaseCandidateNamespaceLister // interface. type leaseCandidateNamespaceLister struct { - listers.ResourceIndexer[*v1alpha1.LeaseCandidate] + listers.ResourceIndexer[*coordinationv1alpha2.LeaseCandidate] } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go index dbe132696..dfbd02acf 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/coordination/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LeaseLister helps list Leases. @@ -30,7 +30,7 @@ import ( type LeaseLister interface { // List lists all Leases in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1beta1.Lease, err error) // Leases returns an object that can list and get Leases. Leases(namespace string) LeaseNamespaceLister LeaseListerExpansion @@ -38,17 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - listers.ResourceIndexer[*v1beta1.Lease] + listers.ResourceIndexer[*coordinationv1beta1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{listers.New[*v1beta1.Lease](indexer, v1beta1.Resource("lease"))} + return &leaseLister{listers.New[*coordinationv1beta1.Lease](indexer, coordinationv1beta1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{listers.NewNamespaced[*v1beta1.Lease](s.ResourceIndexer, namespace)} + return leaseNamespaceLister{listers.NewNamespaced[*coordinationv1beta1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -56,15 +56,15 @@ func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { type LeaseNamespaceLister interface { // List lists all Leases in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1beta1.Lease, err error) // Get retrieves the Lease from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Lease, error) + Get(name string) (*coordinationv1beta1.Lease, error) LeaseNamespaceListerExpansion } // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Lease] + listers.ResourceIndexer[*coordinationv1beta1.Lease] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go index 9e3274b5a..c88836706 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ComponentStatusLister helps list ComponentStatuses. @@ -30,19 +30,19 @@ import ( type ComponentStatusLister interface { // List lists all ComponentStatuses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) + List(selector labels.Selector) (ret []*corev1.ComponentStatus, err error) // Get retrieves the ComponentStatus from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ComponentStatus, error) + Get(name string) (*corev1.ComponentStatus, error) ComponentStatusListerExpansion } // componentStatusLister implements the ComponentStatusLister interface. type componentStatusLister struct { - listers.ResourceIndexer[*v1.ComponentStatus] + listers.ResourceIndexer[*corev1.ComponentStatus] } // NewComponentStatusLister returns a new ComponentStatusLister. func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { - return &componentStatusLister{listers.New[*v1.ComponentStatus](indexer, v1.Resource("componentstatus"))} + return &componentStatusLister{listers.New[*corev1.ComponentStatus](indexer, corev1.Resource("componentstatus"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go index 0dde404f2..54d25412e 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ConfigMapLister helps list ConfigMaps. @@ -30,7 +30,7 @@ import ( type ConfigMapLister interface { // List lists all ConfigMaps in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) // ConfigMaps returns an object that can list and get ConfigMaps. ConfigMaps(namespace string) ConfigMapNamespaceLister ConfigMapListerExpansion @@ -38,17 +38,17 @@ type ConfigMapLister interface { // configMapLister implements the ConfigMapLister interface. type configMapLister struct { - listers.ResourceIndexer[*v1.ConfigMap] + listers.ResourceIndexer[*corev1.ConfigMap] } // NewConfigMapLister returns a new ConfigMapLister. func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { - return &configMapLister{listers.New[*v1.ConfigMap](indexer, v1.Resource("configmap"))} + return &configMapLister{listers.New[*corev1.ConfigMap](indexer, corev1.Resource("configmap"))} } // ConfigMaps returns an object that can list and get ConfigMaps. func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { - return configMapNamespaceLister{listers.NewNamespaced[*v1.ConfigMap](s.ResourceIndexer, namespace)} + return configMapNamespaceLister{listers.NewNamespaced[*corev1.ConfigMap](s.ResourceIndexer, namespace)} } // ConfigMapNamespaceLister helps list and get ConfigMaps. @@ -56,15 +56,15 @@ func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister type ConfigMapNamespaceLister interface { // List lists all ConfigMaps in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) // Get retrieves the ConfigMap from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ConfigMap, error) + Get(name string) (*corev1.ConfigMap, error) ConfigMapNamespaceListerExpansion } // configMapNamespaceLister implements the ConfigMapNamespaceLister // interface. type configMapNamespaceLister struct { - listers.ResourceIndexer[*v1.ConfigMap] + listers.ResourceIndexer[*corev1.ConfigMap] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go index 726b43255..a9d4e45a1 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EndpointsLister helps list Endpoints. @@ -30,7 +30,7 @@ import ( type EndpointsLister interface { // List lists all Endpoints in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Endpoints, err error) + List(selector labels.Selector) (ret []*corev1.Endpoints, err error) // Endpoints returns an object that can list and get Endpoints. Endpoints(namespace string) EndpointsNamespaceLister EndpointsListerExpansion @@ -38,17 +38,17 @@ type EndpointsLister interface { // endpointsLister implements the EndpointsLister interface. type endpointsLister struct { - listers.ResourceIndexer[*v1.Endpoints] + listers.ResourceIndexer[*corev1.Endpoints] } // NewEndpointsLister returns a new EndpointsLister. func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { - return &endpointsLister{listers.New[*v1.Endpoints](indexer, v1.Resource("endpoints"))} + return &endpointsLister{listers.New[*corev1.Endpoints](indexer, corev1.Resource("endpoints"))} } // Endpoints returns an object that can list and get Endpoints. func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { - return endpointsNamespaceLister{listers.NewNamespaced[*v1.Endpoints](s.ResourceIndexer, namespace)} + return endpointsNamespaceLister{listers.NewNamespaced[*corev1.Endpoints](s.ResourceIndexer, namespace)} } // EndpointsNamespaceLister helps list and get Endpoints. @@ -56,15 +56,15 @@ func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { type EndpointsNamespaceLister interface { // List lists all Endpoints in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Endpoints, err error) + List(selector labels.Selector) (ret []*corev1.Endpoints, err error) // Get retrieves the Endpoints from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Endpoints, error) + Get(name string) (*corev1.Endpoints, error) EndpointsNamespaceListerExpansion } // endpointsNamespaceLister implements the EndpointsNamespaceLister // interface. type endpointsNamespaceLister struct { - listers.ResourceIndexer[*v1.Endpoints] + listers.ResourceIndexer[*corev1.Endpoints] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go index 5ab3a1932..0e13fa311 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/event.go +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EventLister helps list Events. @@ -30,7 +30,7 @@ import ( type EventLister interface { // List lists all Events in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*corev1.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -38,17 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - listers.ResourceIndexer[*v1.Event] + listers.ResourceIndexer[*corev1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))} + return &eventLister{listers.New[*corev1.Event](indexer, corev1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)} + return eventNamespaceLister{listers.NewNamespaced[*corev1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -56,15 +56,15 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*corev1.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Event, error) + Get(name string) (*corev1.Event, error) EventNamespaceListerExpansion } // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - listers.ResourceIndexer[*v1.Event] + listers.ResourceIndexer[*corev1.Event] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go index 5c7593cfa..26402c822 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LimitRangeLister helps list LimitRanges. @@ -30,7 +30,7 @@ import ( type LimitRangeLister interface { // List lists all LimitRanges in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.LimitRange, err error) + List(selector labels.Selector) (ret []*corev1.LimitRange, err error) // LimitRanges returns an object that can list and get LimitRanges. LimitRanges(namespace string) LimitRangeNamespaceLister LimitRangeListerExpansion @@ -38,17 +38,17 @@ type LimitRangeLister interface { // limitRangeLister implements the LimitRangeLister interface. type limitRangeLister struct { - listers.ResourceIndexer[*v1.LimitRange] + listers.ResourceIndexer[*corev1.LimitRange] } // NewLimitRangeLister returns a new LimitRangeLister. func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { - return &limitRangeLister{listers.New[*v1.LimitRange](indexer, v1.Resource("limitrange"))} + return &limitRangeLister{listers.New[*corev1.LimitRange](indexer, corev1.Resource("limitrange"))} } // LimitRanges returns an object that can list and get LimitRanges. func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { - return limitRangeNamespaceLister{listers.NewNamespaced[*v1.LimitRange](s.ResourceIndexer, namespace)} + return limitRangeNamespaceLister{listers.NewNamespaced[*corev1.LimitRange](s.ResourceIndexer, namespace)} } // LimitRangeNamespaceLister helps list and get LimitRanges. @@ -56,15 +56,15 @@ func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceList type LimitRangeNamespaceLister interface { // List lists all LimitRanges in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.LimitRange, err error) + List(selector labels.Selector) (ret []*corev1.LimitRange, err error) // Get retrieves the LimitRange from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.LimitRange, error) + Get(name string) (*corev1.LimitRange, error) LimitRangeNamespaceListerExpansion } // limitRangeNamespaceLister implements the LimitRangeNamespaceLister // interface. type limitRangeNamespaceLister struct { - listers.ResourceIndexer[*v1.LimitRange] + listers.ResourceIndexer[*corev1.LimitRange] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go index a016447cf..d0cd4e5c7 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NamespaceLister helps list Namespaces. @@ -30,19 +30,19 @@ import ( type NamespaceLister interface { // List lists all Namespaces in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Namespace, err error) + List(selector labels.Selector) (ret []*corev1.Namespace, err error) // Get retrieves the Namespace from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Namespace, error) + Get(name string) (*corev1.Namespace, error) NamespaceListerExpansion } // namespaceLister implements the NamespaceLister interface. type namespaceLister struct { - listers.ResourceIndexer[*v1.Namespace] + listers.ResourceIndexer[*corev1.Namespace] } // NewNamespaceLister returns a new NamespaceLister. func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { - return &namespaceLister{listers.New[*v1.Namespace](indexer, v1.Resource("namespace"))} + return &namespaceLister{listers.New[*corev1.Namespace](indexer, corev1.Resource("namespace"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go index 495c6d79d..c9ffe2e52 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/node.go +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NodeLister helps list Nodes. @@ -30,19 +30,19 @@ import ( type NodeLister interface { // List lists all Nodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Node, err error) + List(selector labels.Selector) (ret []*corev1.Node, err error) // Get retrieves the Node from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Node, error) + Get(name string) (*corev1.Node, error) NodeListerExpansion } // nodeLister implements the NodeLister interface. type nodeLister struct { - listers.ResourceIndexer[*v1.Node] + listers.ResourceIndexer[*corev1.Node] } // NewNodeLister returns a new NodeLister. func NewNodeLister(indexer cache.Indexer) NodeLister { - return &nodeLister{listers.New[*v1.Node](indexer, v1.Resource("node"))} + return &nodeLister{listers.New[*corev1.Node](indexer, corev1.Resource("node"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go index 17f19bb7a..3f3a17cba 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PersistentVolumeLister helps list PersistentVolumes. @@ -30,19 +30,19 @@ import ( type PersistentVolumeLister interface { // List lists all PersistentVolumes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) + List(selector labels.Selector) (ret []*corev1.PersistentVolume, err error) // Get retrieves the PersistentVolume from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PersistentVolume, error) + Get(name string) (*corev1.PersistentVolume, error) PersistentVolumeListerExpansion } // persistentVolumeLister implements the PersistentVolumeLister interface. type persistentVolumeLister struct { - listers.ResourceIndexer[*v1.PersistentVolume] + listers.ResourceIndexer[*corev1.PersistentVolume] } // NewPersistentVolumeLister returns a new PersistentVolumeLister. func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { - return &persistentVolumeLister{listers.New[*v1.PersistentVolume](indexer, v1.Resource("persistentvolume"))} + return &persistentVolumeLister{listers.New[*corev1.PersistentVolume](indexer, corev1.Resource("persistentvolume"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go index ce9df9031..0665e6172 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PersistentVolumeClaimLister helps list PersistentVolumeClaims. @@ -30,7 +30,7 @@ import ( type PersistentVolumeClaimLister interface { // List lists all PersistentVolumeClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + List(selector labels.Selector) (ret []*corev1.PersistentVolumeClaim, err error) // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister PersistentVolumeClaimListerExpansion @@ -38,17 +38,17 @@ type PersistentVolumeClaimLister interface { // persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. type persistentVolumeClaimLister struct { - listers.ResourceIndexer[*v1.PersistentVolumeClaim] + listers.ResourceIndexer[*corev1.PersistentVolumeClaim] } // NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { - return &persistentVolumeClaimLister{listers.New[*v1.PersistentVolumeClaim](indexer, v1.Resource("persistentvolumeclaim"))} + return &persistentVolumeClaimLister{listers.New[*corev1.PersistentVolumeClaim](indexer, corev1.Resource("persistentvolumeclaim"))} } // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { - return persistentVolumeClaimNamespaceLister{listers.NewNamespaced[*v1.PersistentVolumeClaim](s.ResourceIndexer, namespace)} + return persistentVolumeClaimNamespaceLister{listers.NewNamespaced[*corev1.PersistentVolumeClaim](s.ResourceIndexer, namespace)} } // PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. @@ -56,15 +56,15 @@ func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) P type PersistentVolumeClaimNamespaceLister interface { // List lists all PersistentVolumeClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + List(selector labels.Selector) (ret []*corev1.PersistentVolumeClaim, err error) // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PersistentVolumeClaim, error) + Get(name string) (*corev1.PersistentVolumeClaim, error) PersistentVolumeClaimNamespaceListerExpansion } // persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister // interface. type persistentVolumeClaimNamespaceLister struct { - listers.ResourceIndexer[*v1.PersistentVolumeClaim] + listers.ResourceIndexer[*corev1.PersistentVolumeClaim] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go index b17a8382a..0ea9d678b 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/pod.go +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodLister helps list Pods. @@ -30,7 +30,7 @@ import ( type PodLister interface { // List lists all Pods in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Pod, err error) + List(selector labels.Selector) (ret []*corev1.Pod, err error) // Pods returns an object that can list and get Pods. Pods(namespace string) PodNamespaceLister PodListerExpansion @@ -38,17 +38,17 @@ type PodLister interface { // podLister implements the PodLister interface. type podLister struct { - listers.ResourceIndexer[*v1.Pod] + listers.ResourceIndexer[*corev1.Pod] } // NewPodLister returns a new PodLister. func NewPodLister(indexer cache.Indexer) PodLister { - return &podLister{listers.New[*v1.Pod](indexer, v1.Resource("pod"))} + return &podLister{listers.New[*corev1.Pod](indexer, corev1.Resource("pod"))} } // Pods returns an object that can list and get Pods. func (s *podLister) Pods(namespace string) PodNamespaceLister { - return podNamespaceLister{listers.NewNamespaced[*v1.Pod](s.ResourceIndexer, namespace)} + return podNamespaceLister{listers.NewNamespaced[*corev1.Pod](s.ResourceIndexer, namespace)} } // PodNamespaceLister helps list and get Pods. @@ -56,15 +56,15 @@ func (s *podLister) Pods(namespace string) PodNamespaceLister { type PodNamespaceLister interface { // List lists all Pods in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Pod, err error) + List(selector labels.Selector) (ret []*corev1.Pod, err error) // Get retrieves the Pod from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Pod, error) + Get(name string) (*corev1.Pod, error) PodNamespaceListerExpansion } // podNamespaceLister implements the PodNamespaceLister // interface. type podNamespaceLister struct { - listers.ResourceIndexer[*v1.Pod] + listers.ResourceIndexer[*corev1.Pod] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go index 8ac93148f..6ee5daf08 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodTemplateLister helps list PodTemplates. @@ -30,7 +30,7 @@ import ( type PodTemplateLister interface { // List lists all PodTemplates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + List(selector labels.Selector) (ret []*corev1.PodTemplate, err error) // PodTemplates returns an object that can list and get PodTemplates. PodTemplates(namespace string) PodTemplateNamespaceLister PodTemplateListerExpansion @@ -38,17 +38,17 @@ type PodTemplateLister interface { // podTemplateLister implements the PodTemplateLister interface. type podTemplateLister struct { - listers.ResourceIndexer[*v1.PodTemplate] + listers.ResourceIndexer[*corev1.PodTemplate] } // NewPodTemplateLister returns a new PodTemplateLister. func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { - return &podTemplateLister{listers.New[*v1.PodTemplate](indexer, v1.Resource("podtemplate"))} + return &podTemplateLister{listers.New[*corev1.PodTemplate](indexer, corev1.Resource("podtemplate"))} } // PodTemplates returns an object that can list and get PodTemplates. func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { - return podTemplateNamespaceLister{listers.NewNamespaced[*v1.PodTemplate](s.ResourceIndexer, namespace)} + return podTemplateNamespaceLister{listers.NewNamespaced[*corev1.PodTemplate](s.ResourceIndexer, namespace)} } // PodTemplateNamespaceLister helps list and get PodTemplates. @@ -56,15 +56,15 @@ func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceL type PodTemplateNamespaceLister interface { // List lists all PodTemplates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + List(selector labels.Selector) (ret []*corev1.PodTemplate, err error) // Get retrieves the PodTemplate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PodTemplate, error) + Get(name string) (*corev1.PodTemplate, error) PodTemplateNamespaceListerExpansion } // podTemplateNamespaceLister implements the PodTemplateNamespaceLister // interface. type podTemplateNamespaceLister struct { - listers.ResourceIndexer[*v1.PodTemplate] + listers.ResourceIndexer[*corev1.PodTemplate] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go index 8ce23fc09..8d366f740 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicationControllerLister helps list ReplicationControllers. @@ -30,7 +30,7 @@ import ( type ReplicationControllerLister interface { // List lists all ReplicationControllers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + List(selector labels.Selector) (ret []*corev1.ReplicationController, err error) // ReplicationControllers returns an object that can list and get ReplicationControllers. ReplicationControllers(namespace string) ReplicationControllerNamespaceLister ReplicationControllerListerExpansion @@ -38,17 +38,17 @@ type ReplicationControllerLister interface { // replicationControllerLister implements the ReplicationControllerLister interface. type replicationControllerLister struct { - listers.ResourceIndexer[*v1.ReplicationController] + listers.ResourceIndexer[*corev1.ReplicationController] } // NewReplicationControllerLister returns a new ReplicationControllerLister. func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { - return &replicationControllerLister{listers.New[*v1.ReplicationController](indexer, v1.Resource("replicationcontroller"))} + return &replicationControllerLister{listers.New[*corev1.ReplicationController](indexer, corev1.Resource("replicationcontroller"))} } // ReplicationControllers returns an object that can list and get ReplicationControllers. func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { - return replicationControllerNamespaceLister{listers.NewNamespaced[*v1.ReplicationController](s.ResourceIndexer, namespace)} + return replicationControllerNamespaceLister{listers.NewNamespaced[*corev1.ReplicationController](s.ResourceIndexer, namespace)} } // ReplicationControllerNamespaceLister helps list and get ReplicationControllers. @@ -56,15 +56,15 @@ func (s *replicationControllerLister) ReplicationControllers(namespace string) R type ReplicationControllerNamespaceLister interface { // List lists all ReplicationControllers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + List(selector labels.Selector) (ret []*corev1.ReplicationController, err error) // Get retrieves the ReplicationController from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ReplicationController, error) + Get(name string) (*corev1.ReplicationController, error) ReplicationControllerNamespaceListerExpansion } // replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister // interface. type replicationControllerNamespaceLister struct { - listers.ResourceIndexer[*v1.ReplicationController] + listers.ResourceIndexer[*corev1.ReplicationController] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go index 4b46194a2..67dd9a286 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ResourceQuotaLister helps list ResourceQuotas. @@ -30,7 +30,7 @@ import ( type ResourceQuotaLister interface { // List lists all ResourceQuotas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + List(selector labels.Selector) (ret []*corev1.ResourceQuota, err error) // ResourceQuotas returns an object that can list and get ResourceQuotas. ResourceQuotas(namespace string) ResourceQuotaNamespaceLister ResourceQuotaListerExpansion @@ -38,17 +38,17 @@ type ResourceQuotaLister interface { // resourceQuotaLister implements the ResourceQuotaLister interface. type resourceQuotaLister struct { - listers.ResourceIndexer[*v1.ResourceQuota] + listers.ResourceIndexer[*corev1.ResourceQuota] } // NewResourceQuotaLister returns a new ResourceQuotaLister. func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { - return &resourceQuotaLister{listers.New[*v1.ResourceQuota](indexer, v1.Resource("resourcequota"))} + return &resourceQuotaLister{listers.New[*corev1.ResourceQuota](indexer, corev1.Resource("resourcequota"))} } // ResourceQuotas returns an object that can list and get ResourceQuotas. func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { - return resourceQuotaNamespaceLister{listers.NewNamespaced[*v1.ResourceQuota](s.ResourceIndexer, namespace)} + return resourceQuotaNamespaceLister{listers.NewNamespaced[*corev1.ResourceQuota](s.ResourceIndexer, namespace)} } // ResourceQuotaNamespaceLister helps list and get ResourceQuotas. @@ -56,15 +56,15 @@ func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaName type ResourceQuotaNamespaceLister interface { // List lists all ResourceQuotas in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + List(selector labels.Selector) (ret []*corev1.ResourceQuota, err error) // Get retrieves the ResourceQuota from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ResourceQuota, error) + Get(name string) (*corev1.ResourceQuota, error) ResourceQuotaNamespaceListerExpansion } // resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister // interface. type resourceQuotaNamespaceLister struct { - listers.ResourceIndexer[*v1.ResourceQuota] + listers.ResourceIndexer[*corev1.ResourceQuota] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go index 47a0c9a08..16a8da382 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/secret.go +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SecretLister helps list Secrets. @@ -30,7 +30,7 @@ import ( type SecretLister interface { // List lists all Secrets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Secret, err error) + List(selector labels.Selector) (ret []*corev1.Secret, err error) // Secrets returns an object that can list and get Secrets. Secrets(namespace string) SecretNamespaceLister SecretListerExpansion @@ -38,17 +38,17 @@ type SecretLister interface { // secretLister implements the SecretLister interface. type secretLister struct { - listers.ResourceIndexer[*v1.Secret] + listers.ResourceIndexer[*corev1.Secret] } // NewSecretLister returns a new SecretLister. func NewSecretLister(indexer cache.Indexer) SecretLister { - return &secretLister{listers.New[*v1.Secret](indexer, v1.Resource("secret"))} + return &secretLister{listers.New[*corev1.Secret](indexer, corev1.Resource("secret"))} } // Secrets returns an object that can list and get Secrets. func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { - return secretNamespaceLister{listers.NewNamespaced[*v1.Secret](s.ResourceIndexer, namespace)} + return secretNamespaceLister{listers.NewNamespaced[*corev1.Secret](s.ResourceIndexer, namespace)} } // SecretNamespaceLister helps list and get Secrets. @@ -56,15 +56,15 @@ func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { type SecretNamespaceLister interface { // List lists all Secrets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Secret, err error) + List(selector labels.Selector) (ret []*corev1.Secret, err error) // Get retrieves the Secret from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Secret, error) + Get(name string) (*corev1.Secret, error) SecretNamespaceListerExpansion } // secretNamespaceLister implements the SecretNamespaceLister // interface. type secretNamespaceLister struct { - listers.ResourceIndexer[*v1.Secret] + listers.ResourceIndexer[*corev1.Secret] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go index 536fb337f..dcd894031 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/service.go +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceLister helps list Services. @@ -30,7 +30,7 @@ import ( type ServiceLister interface { // List lists all Services in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Service, err error) + List(selector labels.Selector) (ret []*corev1.Service, err error) // Services returns an object that can list and get Services. Services(namespace string) ServiceNamespaceLister ServiceListerExpansion @@ -38,17 +38,17 @@ type ServiceLister interface { // serviceLister implements the ServiceLister interface. type serviceLister struct { - listers.ResourceIndexer[*v1.Service] + listers.ResourceIndexer[*corev1.Service] } // NewServiceLister returns a new ServiceLister. func NewServiceLister(indexer cache.Indexer) ServiceLister { - return &serviceLister{listers.New[*v1.Service](indexer, v1.Resource("service"))} + return &serviceLister{listers.New[*corev1.Service](indexer, corev1.Resource("service"))} } // Services returns an object that can list and get Services. func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { - return serviceNamespaceLister{listers.NewNamespaced[*v1.Service](s.ResourceIndexer, namespace)} + return serviceNamespaceLister{listers.NewNamespaced[*corev1.Service](s.ResourceIndexer, namespace)} } // ServiceNamespaceLister helps list and get Services. @@ -56,15 +56,15 @@ func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { type ServiceNamespaceLister interface { // List lists all Services in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Service, err error) + List(selector labels.Selector) (ret []*corev1.Service, err error) // Get retrieves the Service from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Service, error) + Get(name string) (*corev1.Service, error) ServiceNamespaceListerExpansion } // serviceNamespaceLister implements the ServiceNamespaceLister // interface. type serviceNamespaceLister struct { - listers.ResourceIndexer[*v1.Service] + listers.ResourceIndexer[*corev1.Service] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go index 8a4af4f4c..aaf888a82 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceAccountLister helps list ServiceAccounts. @@ -30,7 +30,7 @@ import ( type ServiceAccountLister interface { // List lists all ServiceAccounts in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + List(selector labels.Selector) (ret []*corev1.ServiceAccount, err error) // ServiceAccounts returns an object that can list and get ServiceAccounts. ServiceAccounts(namespace string) ServiceAccountNamespaceLister ServiceAccountListerExpansion @@ -38,17 +38,17 @@ type ServiceAccountLister interface { // serviceAccountLister implements the ServiceAccountLister interface. type serviceAccountLister struct { - listers.ResourceIndexer[*v1.ServiceAccount] + listers.ResourceIndexer[*corev1.ServiceAccount] } // NewServiceAccountLister returns a new ServiceAccountLister. func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { - return &serviceAccountLister{listers.New[*v1.ServiceAccount](indexer, v1.Resource("serviceaccount"))} + return &serviceAccountLister{listers.New[*corev1.ServiceAccount](indexer, corev1.Resource("serviceaccount"))} } // ServiceAccounts returns an object that can list and get ServiceAccounts. func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { - return serviceAccountNamespaceLister{listers.NewNamespaced[*v1.ServiceAccount](s.ResourceIndexer, namespace)} + return serviceAccountNamespaceLister{listers.NewNamespaced[*corev1.ServiceAccount](s.ResourceIndexer, namespace)} } // ServiceAccountNamespaceLister helps list and get ServiceAccounts. @@ -56,15 +56,15 @@ func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountN type ServiceAccountNamespaceLister interface { // List lists all ServiceAccounts in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + List(selector labels.Selector) (ret []*corev1.ServiceAccount, err error) // Get retrieves the ServiceAccount from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ServiceAccount, error) + Get(name string) (*corev1.ServiceAccount, error) ServiceAccountNamespaceListerExpansion } // serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister // interface. type serviceAccountNamespaceLister struct { - listers.ResourceIndexer[*v1.ServiceAccount] + listers.ResourceIndexer[*corev1.ServiceAccount] } diff --git a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go index dcb18f19a..0255ef8bb 100644 --- a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + discoveryv1 "k8s.io/api/discovery/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EndpointSliceLister helps list EndpointSlices. @@ -30,7 +30,7 @@ import ( type EndpointSliceLister interface { // List lists all EndpointSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) // EndpointSlices returns an object that can list and get EndpointSlices. EndpointSlices(namespace string) EndpointSliceNamespaceLister EndpointSliceListerExpansion @@ -38,17 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - listers.ResourceIndexer[*v1.EndpointSlice] + listers.ResourceIndexer[*discoveryv1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{listers.New[*v1.EndpointSlice](indexer, v1.Resource("endpointslice"))} + return &endpointSliceLister{listers.New[*discoveryv1.EndpointSlice](indexer, discoveryv1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{listers.NewNamespaced[*v1.EndpointSlice](s.ResourceIndexer, namespace)} + return endpointSliceNamespaceLister{listers.NewNamespaced[*discoveryv1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -56,15 +56,15 @@ func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceName type EndpointSliceNamespaceLister interface { // List lists all EndpointSlices in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) // Get retrieves the EndpointSlice from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.EndpointSlice, error) + Get(name string) (*discoveryv1.EndpointSlice, error) EndpointSliceNamespaceListerExpansion } // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - listers.ResourceIndexer[*v1.EndpointSlice] + listers.ResourceIndexer[*discoveryv1.EndpointSlice] } diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go index d3762f5c2..2fb6f0549 100644 --- a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/discovery/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EndpointSliceLister helps list EndpointSlices. @@ -30,7 +30,7 @@ import ( type EndpointSliceLister interface { // List lists all EndpointSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1beta1.EndpointSlice, err error) // EndpointSlices returns an object that can list and get EndpointSlices. EndpointSlices(namespace string) EndpointSliceNamespaceLister EndpointSliceListerExpansion @@ -38,17 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - listers.ResourceIndexer[*v1beta1.EndpointSlice] + listers.ResourceIndexer[*discoveryv1beta1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{listers.New[*v1beta1.EndpointSlice](indexer, v1beta1.Resource("endpointslice"))} + return &endpointSliceLister{listers.New[*discoveryv1beta1.EndpointSlice](indexer, discoveryv1beta1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{listers.NewNamespaced[*v1beta1.EndpointSlice](s.ResourceIndexer, namespace)} + return endpointSliceNamespaceLister{listers.NewNamespaced[*discoveryv1beta1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -56,15 +56,15 @@ func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceName type EndpointSliceNamespaceLister interface { // List lists all EndpointSlices in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1beta1.EndpointSlice, err error) // Get retrieves the EndpointSlice from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.EndpointSlice, error) + Get(name string) (*discoveryv1beta1.EndpointSlice, error) EndpointSliceNamespaceListerExpansion } // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.EndpointSlice] + listers.ResourceIndexer[*discoveryv1beta1.EndpointSlice] } diff --git a/vendor/k8s.io/client-go/listers/events/v1/event.go b/vendor/k8s.io/client-go/listers/events/v1/event.go index 66e3c6466..9ea5bbaf8 100644 --- a/vendor/k8s.io/client-go/listers/events/v1/event.go +++ b/vendor/k8s.io/client-go/listers/events/v1/event.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/events/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + eventsv1 "k8s.io/api/events/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EventLister helps list Events. @@ -30,7 +30,7 @@ import ( type EventLister interface { // List lists all Events in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -38,17 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - listers.ResourceIndexer[*v1.Event] + listers.ResourceIndexer[*eventsv1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))} + return &eventLister{listers.New[*eventsv1.Event](indexer, eventsv1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)} + return eventNamespaceLister{listers.NewNamespaced[*eventsv1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -56,15 +56,15 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Event, error) + Get(name string) (*eventsv1.Event, error) EventNamespaceListerExpansion } // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - listers.ResourceIndexer[*v1.Event] + listers.ResourceIndexer[*eventsv1.Event] } diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go index 3d51bb265..24fc96261 100644 --- a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/events/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + eventsv1beta1 "k8s.io/api/events/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EventLister helps list Events. @@ -30,7 +30,7 @@ import ( type EventLister interface { // List lists all Events in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1beta1.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -38,17 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - listers.ResourceIndexer[*v1beta1.Event] + listers.ResourceIndexer[*eventsv1beta1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{listers.New[*v1beta1.Event](indexer, v1beta1.Resource("event"))} + return &eventLister{listers.New[*eventsv1beta1.Event](indexer, eventsv1beta1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{listers.NewNamespaced[*v1beta1.Event](s.ResourceIndexer, namespace)} + return eventNamespaceLister{listers.NewNamespaced[*eventsv1beta1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -56,15 +56,15 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1beta1.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Event, error) + Get(name string) (*eventsv1beta1.Event, error) EventNamespaceListerExpansion } // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Event] + listers.ResourceIndexer[*eventsv1beta1.Event] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go index 4510b4236..c78576088 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DaemonSetLister helps list DaemonSets. @@ -30,7 +30,7 @@ import ( type DaemonSetLister interface { // List lists all DaemonSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.DaemonSet, err error) // DaemonSets returns an object that can list and get DaemonSets. DaemonSets(namespace string) DaemonSetNamespaceLister DaemonSetListerExpansion @@ -38,17 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - listers.ResourceIndexer[*v1beta1.DaemonSet] + listers.ResourceIndexer[*extensionsv1beta1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{listers.New[*v1beta1.DaemonSet](indexer, v1beta1.Resource("daemonset"))} + return &daemonSetLister{listers.New[*extensionsv1beta1.DaemonSet](indexer, extensionsv1beta1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta1.DaemonSet](s.ResourceIndexer, namespace)} + return daemonSetNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -56,15 +56,15 @@ func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister type DaemonSetNamespaceLister interface { // List lists all DaemonSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.DaemonSet, err error) // Get retrieves the DaemonSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.DaemonSet, error) + Get(name string) (*extensionsv1beta1.DaemonSet, error) DaemonSetNamespaceListerExpansion } // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.DaemonSet] + listers.ResourceIndexer[*extensionsv1beta1.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go index 149047c97..efaea3991 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,17 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - listers.ResourceIndexer[*v1beta1.Deployment] + listers.ResourceIndexer[*extensionsv1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))} + return &deploymentLister{listers.New[*extensionsv1beta1.Deployment](indexer, extensionsv1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)} + return deploymentNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -56,15 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Deployment, error) + Get(name string) (*extensionsv1beta1.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Deployment] + listers.ResourceIndexer[*extensionsv1beta1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go index b714eebb3..929a13faa 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -30,7 +30,7 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Ingress, err error) // Ingresses returns an object that can list and get Ingresses. Ingresses(namespace string) IngressNamespaceLister IngressListerExpansion @@ -38,17 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - listers.ResourceIndexer[*v1beta1.Ingress] + listers.ResourceIndexer[*extensionsv1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))} + return &ingressLister{listers.New[*extensionsv1beta1.Ingress](indexer, extensionsv1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)} + return ingressNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -56,15 +56,15 @@ func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { type IngressNamespaceLister interface { // List lists all Ingresses in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Ingress, err error) // Get retrieves the Ingress from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Ingress, error) + Get(name string) (*extensionsv1beta1.Ingress, error) IngressNamespaceListerExpansion } // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Ingress] + listers.ResourceIndexer[*extensionsv1beta1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go index b31099c26..ee88fd974 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NetworkPolicyLister helps list NetworkPolicies. @@ -30,7 +30,7 @@ import ( type NetworkPolicyLister interface { // List lists all NetworkPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.NetworkPolicy, err error) // NetworkPolicies returns an object that can list and get NetworkPolicies. NetworkPolicies(namespace string) NetworkPolicyNamespaceLister NetworkPolicyListerExpansion @@ -38,17 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - listers.ResourceIndexer[*v1beta1.NetworkPolicy] + listers.ResourceIndexer[*extensionsv1beta1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{listers.New[*v1beta1.NetworkPolicy](indexer, v1beta1.Resource("networkpolicy"))} + return &networkPolicyLister{listers.New[*extensionsv1beta1.NetworkPolicy](indexer, extensionsv1beta1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{listers.NewNamespaced[*v1beta1.NetworkPolicy](s.ResourceIndexer, namespace)} + return networkPolicyNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -56,15 +56,15 @@ func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNam type NetworkPolicyNamespaceLister interface { // List lists all NetworkPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.NetworkPolicy, err error) // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.NetworkPolicy, error) + Get(name string) (*extensionsv1beta1.NetworkPolicy, error) NetworkPolicyNamespaceListerExpansion } // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.NetworkPolicy] + listers.ResourceIndexer[*extensionsv1beta1.NetworkPolicy] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go index 604bee80b..853cc2bcd 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicaSetLister helps list ReplicaSets. @@ -30,7 +30,7 @@ import ( type ReplicaSetLister interface { // List lists all ReplicaSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.ReplicaSet, err error) // ReplicaSets returns an object that can list and get ReplicaSets. ReplicaSets(namespace string) ReplicaSetNamespaceLister ReplicaSetListerExpansion @@ -38,17 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - listers.ResourceIndexer[*v1beta1.ReplicaSet] + listers.ResourceIndexer[*extensionsv1beta1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{listers.New[*v1beta1.ReplicaSet](indexer, v1beta1.Resource("replicaset"))} + return &replicaSetLister{listers.New[*extensionsv1beta1.ReplicaSet](indexer, extensionsv1beta1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta1.ReplicaSet](s.ResourceIndexer, namespace)} + return replicaSetNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -56,15 +56,15 @@ func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceList type ReplicaSetNamespaceLister interface { // List lists all ReplicaSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.ReplicaSet, err error) // Get retrieves the ReplicaSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ReplicaSet, error) + Get(name string) (*extensionsv1beta1.ReplicaSet, error) ReplicaSetNamespaceListerExpansion } // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.ReplicaSet] + listers.ResourceIndexer[*extensionsv1beta1.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go index ba7e51487..cccb3022b 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,19 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.FlowSchema, error) + Get(name string) (*flowcontrolv1.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - listers.ResourceIndexer[*v1.FlowSchema] + listers.ResourceIndexer[*flowcontrolv1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{listers.New[*v1.FlowSchema](indexer, v1.Resource("flowschema"))} + return &flowSchemaLister{listers.New[*flowcontrolv1.FlowSchema](indexer, flowcontrolv1.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go index 61f5b9fe6..cc8db4a46 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,19 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - listers.ResourceIndexer[*v1.PriorityLevelConfiguration] + listers.ResourceIndexer[*flowcontrolv1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{listers.New[*v1.PriorityLevelConfiguration](indexer, v1.Resource("prioritylevelconfiguration"))} + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1.PriorityLevelConfiguration](indexer, flowcontrolv1.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go index 59bca6ce4..b85907d03 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,19 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta1.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.FlowSchema, error) + Get(name string) (*flowcontrolv1beta1.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - listers.ResourceIndexer[*v1beta1.FlowSchema] + listers.ResourceIndexer[*flowcontrolv1beta1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{listers.New[*v1beta1.FlowSchema](indexer, v1beta1.Resource("flowschema"))} + return &flowSchemaLister{listers.New[*flowcontrolv1beta1.FlowSchema](indexer, flowcontrolv1beta1.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go index 902f7cc4b..338aef8e2 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,19 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta1.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - listers.ResourceIndexer[*v1beta1.PriorityLevelConfiguration] + listers.ResourceIndexer[*flowcontrolv1beta1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{listers.New[*v1beta1.PriorityLevelConfiguration](indexer, v1beta1.Resource("prioritylevelconfiguration"))} + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1beta1.PriorityLevelConfiguration](indexer, flowcontrolv1beta1.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go index 721c5f6bd..5894a5a29 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,19 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta2.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.FlowSchema, error) + Get(name string) (*flowcontrolv1beta2.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - listers.ResourceIndexer[*v1beta2.FlowSchema] + listers.ResourceIndexer[*flowcontrolv1beta2.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{listers.New[*v1beta2.FlowSchema](indexer, v1beta2.Resource("flowschema"))} + return &flowSchemaLister{listers.New[*flowcontrolv1beta2.FlowSchema](indexer, flowcontrolv1beta2.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go index 3e8a2134f..1236cb384 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,19 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta2.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - listers.ResourceIndexer[*v1beta2.PriorityLevelConfiguration] + listers.ResourceIndexer[*flowcontrolv1beta2.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{listers.New[*v1beta2.PriorityLevelConfiguration](indexer, v1beta2.Resource("prioritylevelconfiguration"))} + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1beta2.PriorityLevelConfiguration](indexer, flowcontrolv1beta2.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go index c5555fd64..5f127d84b 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,19 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta3.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta3.FlowSchema, error) + Get(name string) (*flowcontrolv1beta3.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - listers.ResourceIndexer[*v1beta3.FlowSchema] + listers.ResourceIndexer[*flowcontrolv1beta3.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{listers.New[*v1beta3.FlowSchema](indexer, v1beta3.Resource("flowschema"))} + return &flowSchemaLister{listers.New[*flowcontrolv1beta3.FlowSchema](indexer, flowcontrolv1beta3.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go index 9f7d89c54..d50fc8376 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,19 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta3.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta3.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - listers.ResourceIndexer[*v1beta3.PriorityLevelConfiguration] + listers.ResourceIndexer[*flowcontrolv1beta3.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{listers.New[*v1beta3.PriorityLevelConfiguration](indexer, v1beta3.Resource("prioritylevelconfiguration"))} + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1beta3.PriorityLevelConfiguration](indexer, flowcontrolv1beta3.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go index 3007cd349..7d31b09c1 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1 "k8s.io/api/networking/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -30,7 +30,7 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1.Ingress, err error) // Ingresses returns an object that can list and get Ingresses. Ingresses(namespace string) IngressNamespaceLister IngressListerExpansion @@ -38,17 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - listers.ResourceIndexer[*v1.Ingress] + listers.ResourceIndexer[*networkingv1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{listers.New[*v1.Ingress](indexer, v1.Resource("ingress"))} + return &ingressLister{listers.New[*networkingv1.Ingress](indexer, networkingv1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{listers.NewNamespaced[*v1.Ingress](s.ResourceIndexer, namespace)} + return ingressNamespaceLister{listers.NewNamespaced[*networkingv1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -56,15 +56,15 @@ func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { type IngressNamespaceLister interface { // List lists all Ingresses in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1.Ingress, err error) // Get retrieves the Ingress from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Ingress, error) + Get(name string) (*networkingv1.Ingress, error) IngressNamespaceListerExpansion } // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - listers.ResourceIndexer[*v1.Ingress] + listers.ResourceIndexer[*networkingv1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go index a8efe5c5e..71d432164 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1 "k8s.io/api/networking/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressClassLister helps list IngressClasses. @@ -30,19 +30,19 @@ import ( type IngressClassLister interface { // List lists all IngressClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.IngressClass, err error) + List(selector labels.Selector) (ret []*networkingv1.IngressClass, err error) // Get retrieves the IngressClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.IngressClass, error) + Get(name string) (*networkingv1.IngressClass, error) IngressClassListerExpansion } // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - listers.ResourceIndexer[*v1.IngressClass] + listers.ResourceIndexer[*networkingv1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{listers.New[*v1.IngressClass](indexer, v1.Resource("ingressclass"))} + return &ingressClassLister{listers.New[*networkingv1.IngressClass](indexer, networkingv1.Resource("ingressclass"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go index 9a3e3172e..5a38a74a0 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1 "k8s.io/api/networking/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NetworkPolicyLister helps list NetworkPolicies. @@ -30,7 +30,7 @@ import ( type NetworkPolicyLister interface { // List lists all NetworkPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*networkingv1.NetworkPolicy, err error) // NetworkPolicies returns an object that can list and get NetworkPolicies. NetworkPolicies(namespace string) NetworkPolicyNamespaceLister NetworkPolicyListerExpansion @@ -38,17 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - listers.ResourceIndexer[*v1.NetworkPolicy] + listers.ResourceIndexer[*networkingv1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{listers.New[*v1.NetworkPolicy](indexer, v1.Resource("networkpolicy"))} + return &networkPolicyLister{listers.New[*networkingv1.NetworkPolicy](indexer, networkingv1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{listers.NewNamespaced[*v1.NetworkPolicy](s.ResourceIndexer, namespace)} + return networkPolicyNamespaceLister{listers.NewNamespaced[*networkingv1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -56,15 +56,15 @@ func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNam type NetworkPolicyNamespaceLister interface { // List lists all NetworkPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*networkingv1.NetworkPolicy, err error) // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.NetworkPolicy, error) + Get(name string) (*networkingv1.NetworkPolicy, error) NetworkPolicyNamespaceListerExpansion } // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - listers.ResourceIndexer[*v1.NetworkPolicy] + listers.ResourceIndexer[*networkingv1.NetworkPolicy] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go index 749affd7b..953265eca 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IPAddressLister helps list IPAddresses. @@ -30,19 +30,19 @@ import ( type IPAddressLister interface { // List lists all IPAddresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) + List(selector labels.Selector) (ret []*networkingv1alpha1.IPAddress, err error) // Get retrieves the IPAddress from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.IPAddress, error) + Get(name string) (*networkingv1alpha1.IPAddress, error) IPAddressListerExpansion } // iPAddressLister implements the IPAddressLister interface. type iPAddressLister struct { - listers.ResourceIndexer[*v1alpha1.IPAddress] + listers.ResourceIndexer[*networkingv1alpha1.IPAddress] } // NewIPAddressLister returns a new IPAddressLister. func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { - return &iPAddressLister{listers.New[*v1alpha1.IPAddress](indexer, v1alpha1.Resource("ipaddress"))} + return &iPAddressLister{listers.New[*networkingv1alpha1.IPAddress](indexer, networkingv1alpha1.Resource("ipaddress"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go index 8be2d11af..0c4cb2ebf 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceCIDRLister helps list ServiceCIDRs. @@ -30,19 +30,19 @@ import ( type ServiceCIDRLister interface { // List lists all ServiceCIDRs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) + List(selector labels.Selector) (ret []*networkingv1alpha1.ServiceCIDR, err error) // Get retrieves the ServiceCIDR from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ServiceCIDR, error) + Get(name string) (*networkingv1alpha1.ServiceCIDR, error) ServiceCIDRListerExpansion } // serviceCIDRLister implements the ServiceCIDRLister interface. type serviceCIDRLister struct { - listers.ResourceIndexer[*v1alpha1.ServiceCIDR] + listers.ResourceIndexer[*networkingv1alpha1.ServiceCIDR] } // NewServiceCIDRLister returns a new ServiceCIDRLister. func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { - return &serviceCIDRLister{listers.New[*v1alpha1.ServiceCIDR](indexer, v1alpha1.Resource("servicecidr"))} + return &serviceCIDRLister{listers.New[*networkingv1alpha1.ServiceCIDR](indexer, networkingv1alpha1.Resource("servicecidr"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go index b8fe99e24..f8c172487 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -30,7 +30,7 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.Ingress, err error) // Ingresses returns an object that can list and get Ingresses. Ingresses(namespace string) IngressNamespaceLister IngressListerExpansion @@ -38,17 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - listers.ResourceIndexer[*v1beta1.Ingress] + listers.ResourceIndexer[*networkingv1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))} + return &ingressLister{listers.New[*networkingv1beta1.Ingress](indexer, networkingv1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)} + return ingressNamespaceLister{listers.NewNamespaced[*networkingv1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -56,15 +56,15 @@ func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { type IngressNamespaceLister interface { // List lists all Ingresses in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.Ingress, err error) // Get retrieves the Ingress from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Ingress, error) + Get(name string) (*networkingv1beta1.Ingress, error) IngressNamespaceListerExpansion } // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Ingress] + listers.ResourceIndexer[*networkingv1beta1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go index a5e33525f..0e87e0397 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressClassLister helps list IngressClasses. @@ -30,19 +30,19 @@ import ( type IngressClassLister interface { // List lists all IngressClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.IngressClass, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.IngressClass, err error) // Get retrieves the IngressClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.IngressClass, error) + Get(name string) (*networkingv1beta1.IngressClass, error) IngressClassListerExpansion } // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - listers.ResourceIndexer[*v1beta1.IngressClass] + listers.ResourceIndexer[*networkingv1beta1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{listers.New[*v1beta1.IngressClass](indexer, v1beta1.Resource("ingressclass"))} + return &ingressClassLister{listers.New[*networkingv1beta1.IngressClass](indexer, networkingv1beta1.Resource("ingressclass"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go index 361406670..41a4bf927 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IPAddressLister helps list IPAddresses. @@ -30,19 +30,19 @@ import ( type IPAddressLister interface { // List lists all IPAddresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.IPAddress, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.IPAddress, err error) // Get retrieves the IPAddress from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.IPAddress, error) + Get(name string) (*networkingv1beta1.IPAddress, error) IPAddressListerExpansion } // iPAddressLister implements the IPAddressLister interface. type iPAddressLister struct { - listers.ResourceIndexer[*v1beta1.IPAddress] + listers.ResourceIndexer[*networkingv1beta1.IPAddress] } // NewIPAddressLister returns a new IPAddressLister. func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { - return &iPAddressLister{listers.New[*v1beta1.IPAddress](indexer, v1beta1.Resource("ipaddress"))} + return &iPAddressLister{listers.New[*networkingv1beta1.IPAddress](indexer, networkingv1beta1.Resource("ipaddress"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go index 2902fa7f1..5c9a0eac1 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceCIDRLister helps list ServiceCIDRs. @@ -30,19 +30,19 @@ import ( type ServiceCIDRLister interface { // List lists all ServiceCIDRs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ServiceCIDR, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.ServiceCIDR, err error) // Get retrieves the ServiceCIDR from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ServiceCIDR, error) + Get(name string) (*networkingv1beta1.ServiceCIDR, error) ServiceCIDRListerExpansion } // serviceCIDRLister implements the ServiceCIDRLister interface. type serviceCIDRLister struct { - listers.ResourceIndexer[*v1beta1.ServiceCIDR] + listers.ResourceIndexer[*networkingv1beta1.ServiceCIDR] } // NewServiceCIDRLister returns a new ServiceCIDRLister. func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { - return &serviceCIDRLister{listers.New[*v1beta1.ServiceCIDR](indexer, v1beta1.Resource("servicecidr"))} + return &serviceCIDRLister{listers.New[*networkingv1beta1.ServiceCIDR](indexer, networkingv1beta1.Resource("servicecidr"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go index 17b88687e..b8322dbb4 100644 --- a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/node/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + nodev1 "k8s.io/api/node/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RuntimeClassLister helps list RuntimeClasses. @@ -30,19 +30,19 @@ import ( type RuntimeClassLister interface { // List lists all RuntimeClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) + List(selector labels.Selector) (ret []*nodev1.RuntimeClass, err error) // Get retrieves the RuntimeClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.RuntimeClass, error) + Get(name string) (*nodev1.RuntimeClass, error) RuntimeClassListerExpansion } // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - listers.ResourceIndexer[*v1.RuntimeClass] + listers.ResourceIndexer[*nodev1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{listers.New[*v1.RuntimeClass](indexer, v1.Resource("runtimeclass"))} + return &runtimeClassLister{listers.New[*nodev1.RuntimeClass](indexer, nodev1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go index 1f6e06f48..b3d4ad46c 100644 --- a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RuntimeClassLister helps list RuntimeClasses. @@ -30,19 +30,19 @@ import ( type RuntimeClassLister interface { // List lists all RuntimeClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) + List(selector labels.Selector) (ret []*nodev1alpha1.RuntimeClass, err error) // Get retrieves the RuntimeClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.RuntimeClass, error) + Get(name string) (*nodev1alpha1.RuntimeClass, error) RuntimeClassListerExpansion } // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - listers.ResourceIndexer[*v1alpha1.RuntimeClass] + listers.ResourceIndexer[*nodev1alpha1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{listers.New[*v1alpha1.RuntimeClass](indexer, v1alpha1.Resource("runtimeclass"))} + return &runtimeClassLister{listers.New[*nodev1alpha1.RuntimeClass](indexer, nodev1alpha1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go index cd0cdf3c5..1b9f8d799 100644 --- a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + nodev1beta1 "k8s.io/api/node/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RuntimeClassLister helps list RuntimeClasses. @@ -30,19 +30,19 @@ import ( type RuntimeClassLister interface { // List lists all RuntimeClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) + List(selector labels.Selector) (ret []*nodev1beta1.RuntimeClass, err error) // Get retrieves the RuntimeClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.RuntimeClass, error) + Get(name string) (*nodev1beta1.RuntimeClass, error) RuntimeClassListerExpansion } // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - listers.ResourceIndexer[*v1beta1.RuntimeClass] + listers.ResourceIndexer[*nodev1beta1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{listers.New[*v1beta1.RuntimeClass](indexer, v1beta1.Resource("runtimeclass"))} + return &runtimeClassLister{listers.New[*nodev1beta1.RuntimeClass](indexer, nodev1beta1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go index 83695668f..8dccd7313 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + policyv1 "k8s.io/api/policy/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EvictionLister helps list Evictions. @@ -30,7 +30,7 @@ import ( type EvictionLister interface { // List lists all Evictions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1.Eviction, err error) // Evictions returns an object that can list and get Evictions. Evictions(namespace string) EvictionNamespaceLister EvictionListerExpansion @@ -38,17 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - listers.ResourceIndexer[*v1.Eviction] + listers.ResourceIndexer[*policyv1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{listers.New[*v1.Eviction](indexer, v1.Resource("eviction"))} + return &evictionLister{listers.New[*policyv1.Eviction](indexer, policyv1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{listers.NewNamespaced[*v1.Eviction](s.ResourceIndexer, namespace)} + return evictionNamespaceLister{listers.NewNamespaced[*policyv1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -56,15 +56,15 @@ func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { type EvictionNamespaceLister interface { // List lists all Evictions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1.Eviction, err error) // Get retrieves the Eviction from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Eviction, error) + Get(name string) (*policyv1.Eviction, error) EvictionNamespaceListerExpansion } // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - listers.ResourceIndexer[*v1.Eviction] + listers.ResourceIndexer[*policyv1.Eviction] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go index 38ed8144e..1a6273b3e 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + policyv1 "k8s.io/api/policy/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodDisruptionBudgetLister helps list PodDisruptionBudgets. @@ -30,7 +30,7 @@ import ( type PodDisruptionBudgetLister interface { // List lists all PodDisruptionBudgets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1.PodDisruptionBudget, err error) // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister PodDisruptionBudgetListerExpansion @@ -38,17 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - listers.ResourceIndexer[*v1.PodDisruptionBudget] + listers.ResourceIndexer[*policyv1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{listers.New[*v1.PodDisruptionBudget](indexer, v1.Resource("poddisruptionbudget"))} + return &podDisruptionBudgetLister{listers.New[*policyv1.PodDisruptionBudget](indexer, policyv1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1.PodDisruptionBudget](s.ResourceIndexer, namespace)} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*policyv1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -56,15 +56,15 @@ func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDi type PodDisruptionBudgetNamespaceLister interface { // List lists all PodDisruptionBudgets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1.PodDisruptionBudget, err error) // Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PodDisruptionBudget, error) + Get(name string) (*policyv1.PodDisruptionBudget, error) PodDisruptionBudgetNamespaceListerExpansion } // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - listers.ResourceIndexer[*v1.PodDisruptionBudget] + listers.ResourceIndexer[*policyv1.PodDisruptionBudget] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go index 0aff83352..318c380fa 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + policyv1beta1 "k8s.io/api/policy/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EvictionLister helps list Evictions. @@ -30,7 +30,7 @@ import ( type EvictionLister interface { // List lists all Evictions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1beta1.Eviction, err error) // Evictions returns an object that can list and get Evictions. Evictions(namespace string) EvictionNamespaceLister EvictionListerExpansion @@ -38,17 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - listers.ResourceIndexer[*v1beta1.Eviction] + listers.ResourceIndexer[*policyv1beta1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{listers.New[*v1beta1.Eviction](indexer, v1beta1.Resource("eviction"))} + return &evictionLister{listers.New[*policyv1beta1.Eviction](indexer, policyv1beta1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{listers.NewNamespaced[*v1beta1.Eviction](s.ResourceIndexer, namespace)} + return evictionNamespaceLister{listers.NewNamespaced[*policyv1beta1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -56,15 +56,15 @@ func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { type EvictionNamespaceLister interface { // List lists all Evictions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1beta1.Eviction, err error) // Get retrieves the Eviction from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Eviction, error) + Get(name string) (*policyv1beta1.Eviction, error) EvictionNamespaceListerExpansion } // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Eviction] + listers.ResourceIndexer[*policyv1beta1.Eviction] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go index 55ae892e2..fb156e97b 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + policyv1beta1 "k8s.io/api/policy/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodDisruptionBudgetLister helps list PodDisruptionBudgets. @@ -30,7 +30,7 @@ import ( type PodDisruptionBudgetLister interface { // List lists all PodDisruptionBudgets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1beta1.PodDisruptionBudget, err error) // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister PodDisruptionBudgetListerExpansion @@ -38,17 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - listers.ResourceIndexer[*v1beta1.PodDisruptionBudget] + listers.ResourceIndexer[*policyv1beta1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{listers.New[*v1beta1.PodDisruptionBudget](indexer, v1beta1.Resource("poddisruptionbudget"))} + return &podDisruptionBudgetLister{listers.New[*policyv1beta1.PodDisruptionBudget](indexer, policyv1beta1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1beta1.PodDisruptionBudget](s.ResourceIndexer, namespace)} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*policyv1beta1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -56,15 +56,15 @@ func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDi type PodDisruptionBudgetNamespaceLister interface { // List lists all PodDisruptionBudgets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1beta1.PodDisruptionBudget, err error) // Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PodDisruptionBudget, error) + Get(name string) (*policyv1beta1.PodDisruptionBudget, error) PodDisruptionBudgetNamespaceListerExpansion } // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.PodDisruptionBudget] + listers.ResourceIndexer[*policyv1beta1.PodDisruptionBudget] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go index 11a4cb4db..456393aee 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleLister helps list ClusterRoles. @@ -30,19 +30,19 @@ import ( type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ClusterRole, err error) + List(selector labels.Selector) (ret []*rbacv1.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ClusterRole, error) + Get(name string) (*rbacv1.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - listers.ResourceIndexer[*v1.ClusterRole] + listers.ResourceIndexer[*rbacv1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{listers.New[*v1.ClusterRole](indexer, v1.Resource("clusterrole"))} + return &clusterRoleLister{listers.New[*rbacv1.ClusterRole](indexer, rbacv1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go index 4c3583bb9..bf84144a7 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleBindingLister helps list ClusterRoleBindings. @@ -30,19 +30,19 @@ import ( type ClusterRoleBindingLister interface { // List lists all ClusterRoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1.ClusterRoleBinding, err error) // Get retrieves the ClusterRoleBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ClusterRoleBinding, error) + Get(name string) (*rbacv1.ClusterRoleBinding, error) ClusterRoleBindingListerExpansion } // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - listers.ResourceIndexer[*v1.ClusterRoleBinding] + listers.ResourceIndexer[*rbacv1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{listers.New[*v1.ClusterRoleBinding](indexer, v1.Resource("clusterrolebinding"))} + return &clusterRoleBindingLister{listers.New[*rbacv1.ClusterRoleBinding](indexer, rbacv1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1/role.go index 3e9425321..d0077e3ce 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/role.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleLister helps list Roles. @@ -30,7 +30,7 @@ import ( type RoleLister interface { // List lists all Roles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1.Role, err error) // Roles returns an object that can list and get Roles. Roles(namespace string) RoleNamespaceLister RoleListerExpansion @@ -38,17 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - listers.ResourceIndexer[*v1.Role] + listers.ResourceIndexer[*rbacv1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{listers.New[*v1.Role](indexer, v1.Resource("role"))} + return &roleLister{listers.New[*rbacv1.Role](indexer, rbacv1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{listers.NewNamespaced[*v1.Role](s.ResourceIndexer, namespace)} + return roleNamespaceLister{listers.NewNamespaced[*rbacv1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -56,15 +56,15 @@ func (s *roleLister) Roles(namespace string) RoleNamespaceLister { type RoleNamespaceLister interface { // List lists all Roles in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1.Role, err error) // Get retrieves the Role from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Role, error) + Get(name string) (*rbacv1.Role, error) RoleNamespaceListerExpansion } // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - listers.ResourceIndexer[*v1.Role] + listers.ResourceIndexer[*rbacv1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go index 1b3162a11..a0e366156 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleBindingLister helps list RoleBindings. @@ -30,7 +30,7 @@ import ( type RoleBindingLister interface { // List lists all RoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1.RoleBinding, err error) // RoleBindings returns an object that can list and get RoleBindings. RoleBindings(namespace string) RoleBindingNamespaceLister RoleBindingListerExpansion @@ -38,17 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - listers.ResourceIndexer[*v1.RoleBinding] + listers.ResourceIndexer[*rbacv1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{listers.New[*v1.RoleBinding](indexer, v1.Resource("rolebinding"))} + return &roleBindingLister{listers.New[*rbacv1.RoleBinding](indexer, rbacv1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{listers.NewNamespaced[*v1.RoleBinding](s.ResourceIndexer, namespace)} + return roleBindingNamespaceLister{listers.NewNamespaced[*rbacv1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -56,15 +56,15 @@ func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceL type RoleBindingNamespaceLister interface { // List lists all RoleBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1.RoleBinding, err error) // Get retrieves the RoleBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.RoleBinding, error) + Get(name string) (*rbacv1.RoleBinding, error) RoleBindingNamespaceListerExpansion } // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - listers.ResourceIndexer[*v1.RoleBinding] + listers.ResourceIndexer[*rbacv1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go index 5e5bbaa5a..1783b7161 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleLister helps list ClusterRoles. @@ -30,19 +30,19 @@ import ( type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterRole, error) + Get(name string) (*rbacv1alpha1.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - listers.ResourceIndexer[*v1alpha1.ClusterRole] + listers.ResourceIndexer[*rbacv1alpha1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{listers.New[*v1alpha1.ClusterRole](indexer, v1alpha1.Resource("clusterrole"))} + return &clusterRoleLister{listers.New[*rbacv1alpha1.ClusterRole](indexer, rbacv1alpha1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go index d825d0a2f..be80c7585 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleBindingLister helps list ClusterRoleBindings. @@ -30,19 +30,19 @@ import ( type ClusterRoleBindingLister interface { // List lists all ClusterRoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.ClusterRoleBinding, err error) // Get retrieves the ClusterRoleBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterRoleBinding, error) + Get(name string) (*rbacv1alpha1.ClusterRoleBinding, error) ClusterRoleBindingListerExpansion } // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - listers.ResourceIndexer[*v1alpha1.ClusterRoleBinding] + listers.ResourceIndexer[*rbacv1alpha1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{listers.New[*v1alpha1.ClusterRoleBinding](indexer, v1alpha1.Resource("clusterrolebinding"))} + return &clusterRoleBindingLister{listers.New[*rbacv1alpha1.ClusterRoleBinding](indexer, rbacv1alpha1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go index f3d2b2838..28a1ede19 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleLister helps list Roles. @@ -30,7 +30,7 @@ import ( type RoleLister interface { // List lists all Roles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.Role, err error) // Roles returns an object that can list and get Roles. Roles(namespace string) RoleNamespaceLister RoleListerExpansion @@ -38,17 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - listers.ResourceIndexer[*v1alpha1.Role] + listers.ResourceIndexer[*rbacv1alpha1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{listers.New[*v1alpha1.Role](indexer, v1alpha1.Resource("role"))} + return &roleLister{listers.New[*rbacv1alpha1.Role](indexer, rbacv1alpha1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{listers.NewNamespaced[*v1alpha1.Role](s.ResourceIndexer, namespace)} + return roleNamespaceLister{listers.NewNamespaced[*rbacv1alpha1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -56,15 +56,15 @@ func (s *roleLister) Roles(namespace string) RoleNamespaceLister { type RoleNamespaceLister interface { // List lists all Roles in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.Role, err error) // Get retrieves the Role from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Role, error) + Get(name string) (*rbacv1alpha1.Role, error) RoleNamespaceListerExpansion } // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - listers.ResourceIndexer[*v1alpha1.Role] + listers.ResourceIndexer[*rbacv1alpha1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go index 6d6f7b700..67e123f63 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleBindingLister helps list RoleBindings. @@ -30,7 +30,7 @@ import ( type RoleBindingLister interface { // List lists all RoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.RoleBinding, err error) // RoleBindings returns an object that can list and get RoleBindings. RoleBindings(namespace string) RoleBindingNamespaceLister RoleBindingListerExpansion @@ -38,17 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - listers.ResourceIndexer[*v1alpha1.RoleBinding] + listers.ResourceIndexer[*rbacv1alpha1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{listers.New[*v1alpha1.RoleBinding](indexer, v1alpha1.Resource("rolebinding"))} + return &roleBindingLister{listers.New[*rbacv1alpha1.RoleBinding](indexer, rbacv1alpha1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{listers.NewNamespaced[*v1alpha1.RoleBinding](s.ResourceIndexer, namespace)} + return roleBindingNamespaceLister{listers.NewNamespaced[*rbacv1alpha1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -56,15 +56,15 @@ func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceL type RoleBindingNamespaceLister interface { // List lists all RoleBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.RoleBinding, err error) // Get retrieves the RoleBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.RoleBinding, error) + Get(name string) (*rbacv1alpha1.RoleBinding, error) RoleBindingNamespaceListerExpansion } // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - listers.ResourceIndexer[*v1alpha1.RoleBinding] + listers.ResourceIndexer[*rbacv1alpha1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go index bade03262..9cf996b86 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleLister helps list ClusterRoles. @@ -30,19 +30,19 @@ import ( type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ClusterRole, error) + Get(name string) (*rbacv1beta1.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - listers.ResourceIndexer[*v1beta1.ClusterRole] + listers.ResourceIndexer[*rbacv1beta1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{listers.New[*v1beta1.ClusterRole](indexer, v1beta1.Resource("clusterrole"))} + return &clusterRoleLister{listers.New[*rbacv1beta1.ClusterRole](indexer, rbacv1beta1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go index 1f4d391be..41418f762 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleBindingLister helps list ClusterRoleBindings. @@ -30,19 +30,19 @@ import ( type ClusterRoleBindingLister interface { // List lists all ClusterRoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.ClusterRoleBinding, err error) // Get retrieves the ClusterRoleBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ClusterRoleBinding, error) + Get(name string) (*rbacv1beta1.ClusterRoleBinding, error) ClusterRoleBindingListerExpansion } // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - listers.ResourceIndexer[*v1beta1.ClusterRoleBinding] + listers.ResourceIndexer[*rbacv1beta1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{listers.New[*v1beta1.ClusterRoleBinding](indexer, v1beta1.Resource("clusterrolebinding"))} + return &clusterRoleBindingLister{listers.New[*rbacv1beta1.ClusterRoleBinding](indexer, rbacv1beta1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go index 71666a9a0..04d4ab772 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleLister helps list Roles. @@ -30,7 +30,7 @@ import ( type RoleLister interface { // List lists all Roles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.Role, err error) // Roles returns an object that can list and get Roles. Roles(namespace string) RoleNamespaceLister RoleListerExpansion @@ -38,17 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - listers.ResourceIndexer[*v1beta1.Role] + listers.ResourceIndexer[*rbacv1beta1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{listers.New[*v1beta1.Role](indexer, v1beta1.Resource("role"))} + return &roleLister{listers.New[*rbacv1beta1.Role](indexer, rbacv1beta1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{listers.NewNamespaced[*v1beta1.Role](s.ResourceIndexer, namespace)} + return roleNamespaceLister{listers.NewNamespaced[*rbacv1beta1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -56,15 +56,15 @@ func (s *roleLister) Roles(namespace string) RoleNamespaceLister { type RoleNamespaceLister interface { // List lists all Roles in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.Role, err error) // Get retrieves the Role from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Role, error) + Get(name string) (*rbacv1beta1.Role, error) RoleNamespaceListerExpansion } // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.Role] + listers.ResourceIndexer[*rbacv1beta1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go index 00f8542cb..816673229 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleBindingLister helps list RoleBindings. @@ -30,7 +30,7 @@ import ( type RoleBindingLister interface { // List lists all RoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.RoleBinding, err error) // RoleBindings returns an object that can list and get RoleBindings. RoleBindings(namespace string) RoleBindingNamespaceLister RoleBindingListerExpansion @@ -38,17 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - listers.ResourceIndexer[*v1beta1.RoleBinding] + listers.ResourceIndexer[*rbacv1beta1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{listers.New[*v1beta1.RoleBinding](indexer, v1beta1.Resource("rolebinding"))} + return &roleBindingLister{listers.New[*rbacv1beta1.RoleBinding](indexer, rbacv1beta1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{listers.NewNamespaced[*v1beta1.RoleBinding](s.ResourceIndexer, namespace)} + return roleBindingNamespaceLister{listers.NewNamespaced[*rbacv1beta1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -56,15 +56,15 @@ func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceL type RoleBindingNamespaceLister interface { // List lists all RoleBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.RoleBinding, err error) // Get retrieves the RoleBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.RoleBinding, error) + Get(name string) (*rbacv1beta1.RoleBinding, error) RoleBindingNamespaceListerExpansion } // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.RoleBinding] + listers.ResourceIndexer[*rbacv1beta1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go index 0950691e2..05032833b 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeviceClassLister helps list DeviceClasses. @@ -30,19 +30,19 @@ import ( type DeviceClassLister interface { // List lists all DeviceClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.DeviceClass, err error) + List(selector labels.Selector) (ret []*resourcev1alpha3.DeviceClass, err error) // Get retrieves the DeviceClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha3.DeviceClass, error) + Get(name string) (*resourcev1alpha3.DeviceClass, error) DeviceClassListerExpansion } // deviceClassLister implements the DeviceClassLister interface. type deviceClassLister struct { - listers.ResourceIndexer[*v1alpha3.DeviceClass] + listers.ResourceIndexer[*resourcev1alpha3.DeviceClass] } // NewDeviceClassLister returns a new DeviceClassLister. func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister { - return &deviceClassLister{listers.New[*v1alpha3.DeviceClass](indexer, v1alpha3.Resource("deviceclass"))} + return &deviceClassLister{listers.New[*resourcev1alpha3.DeviceClass](indexer, resourcev1alpha3.Resource("deviceclass"))} } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go index b6642f635..f626c9283 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go @@ -22,14 +22,6 @@ package v1alpha3 // DeviceClassLister. type DeviceClassListerExpansion interface{} -// PodSchedulingContextListerExpansion allows custom methods to be added to -// PodSchedulingContextLister. -type PodSchedulingContextListerExpansion interface{} - -// PodSchedulingContextNamespaceListerExpansion allows custom methods to be added to -// PodSchedulingContextNamespaceLister. -type PodSchedulingContextNamespaceListerExpansion interface{} - // ResourceClaimListerExpansion allows custom methods to be added to // ResourceClaimLister. type ResourceClaimListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go deleted file mode 100644 index ed9b04943..000000000 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha3 - -import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" -) - -// PodSchedulingContextLister helps list PodSchedulingContexts. -// All objects returned here must be treated as read-only. -type PodSchedulingContextLister interface { - // List lists all PodSchedulingContexts in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error) - // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. - PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister - PodSchedulingContextListerExpansion -} - -// podSchedulingContextLister implements the PodSchedulingContextLister interface. -type podSchedulingContextLister struct { - listers.ResourceIndexer[*v1alpha3.PodSchedulingContext] -} - -// NewPodSchedulingContextLister returns a new PodSchedulingContextLister. -func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister { - return &podSchedulingContextLister{listers.New[*v1alpha3.PodSchedulingContext](indexer, v1alpha3.Resource("podschedulingcontext"))} -} - -// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. -func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister { - return podSchedulingContextNamespaceLister{listers.NewNamespaced[*v1alpha3.PodSchedulingContext](s.ResourceIndexer, namespace)} -} - -// PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts. -// All objects returned here must be treated as read-only. -type PodSchedulingContextNamespaceLister interface { - // List lists all PodSchedulingContexts in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error) - // Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha3.PodSchedulingContext, error) - PodSchedulingContextNamespaceListerExpansion -} - -// podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister -// interface. -type podSchedulingContextNamespaceLister struct { - listers.ResourceIndexer[*v1alpha3.PodSchedulingContext] -} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go index ac6a3e156..9de229bff 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ResourceClaimLister helps list ResourceClaims. @@ -30,7 +30,7 @@ import ( type ResourceClaimLister interface { // List lists all ResourceClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error) + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaim, err error) // ResourceClaims returns an object that can list and get ResourceClaims. ResourceClaims(namespace string) ResourceClaimNamespaceLister ResourceClaimListerExpansion @@ -38,17 +38,17 @@ type ResourceClaimLister interface { // resourceClaimLister implements the ResourceClaimLister interface. type resourceClaimLister struct { - listers.ResourceIndexer[*v1alpha3.ResourceClaim] + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaim] } // NewResourceClaimLister returns a new ResourceClaimLister. func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { - return &resourceClaimLister{listers.New[*v1alpha3.ResourceClaim](indexer, v1alpha3.Resource("resourceclaim"))} + return &resourceClaimLister{listers.New[*resourcev1alpha3.ResourceClaim](indexer, resourcev1alpha3.Resource("resourceclaim"))} } // ResourceClaims returns an object that can list and get ResourceClaims. func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { - return resourceClaimNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaim](s.ResourceIndexer, namespace)} + return resourceClaimNamespaceLister{listers.NewNamespaced[*resourcev1alpha3.ResourceClaim](s.ResourceIndexer, namespace)} } // ResourceClaimNamespaceLister helps list and get ResourceClaims. @@ -56,15 +56,15 @@ func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimName type ResourceClaimNamespaceLister interface { // List lists all ResourceClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error) + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaim, err error) // Get retrieves the ResourceClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha3.ResourceClaim, error) + Get(name string) (*resourcev1alpha3.ResourceClaim, error) ResourceClaimNamespaceListerExpansion } // resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister // interface. type resourceClaimNamespaceLister struct { - listers.ResourceIndexer[*v1alpha3.ResourceClaim] + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaim] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go index 6c15f82bb..b0895edd2 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ResourceClaimTemplateLister helps list ResourceClaimTemplates. @@ -30,7 +30,7 @@ import ( type ResourceClaimTemplateLister interface { // List lists all ResourceClaimTemplates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaimTemplate, err error) // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister ResourceClaimTemplateListerExpansion @@ -38,17 +38,17 @@ type ResourceClaimTemplateLister interface { // resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. type resourceClaimTemplateLister struct { - listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate] + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaimTemplate] } // NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { - return &resourceClaimTemplateLister{listers.New[*v1alpha3.ResourceClaimTemplate](indexer, v1alpha3.Resource("resourceclaimtemplate"))} + return &resourceClaimTemplateLister{listers.New[*resourcev1alpha3.ResourceClaimTemplate](indexer, resourcev1alpha3.Resource("resourceclaimtemplate"))} } // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { - return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaimTemplate](s.ResourceIndexer, namespace)} + return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*resourcev1alpha3.ResourceClaimTemplate](s.ResourceIndexer, namespace)} } // ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. @@ -56,15 +56,15 @@ func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) R type ResourceClaimTemplateNamespaceLister interface { // List lists all ResourceClaimTemplates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaimTemplate, err error) // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha3.ResourceClaimTemplate, error) + Get(name string) (*resourcev1alpha3.ResourceClaimTemplate, error) ResourceClaimTemplateNamespaceListerExpansion } // resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister // interface. type resourceClaimTemplateNamespaceLister struct { - listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate] + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaimTemplate] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go index ae87b8b66..66b1b9e06 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha3 import ( - v1alpha3 "k8s.io/api/resource/v1alpha3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ResourceSliceLister helps list ResourceSlices. @@ -30,19 +30,19 @@ import ( type ResourceSliceLister interface { // List lists all ResourceSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha3.ResourceSlice, err error) + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceSlice, err error) // Get retrieves the ResourceSlice from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha3.ResourceSlice, error) + Get(name string) (*resourcev1alpha3.ResourceSlice, error) ResourceSliceListerExpansion } // resourceSliceLister implements the ResourceSliceLister interface. type resourceSliceLister struct { - listers.ResourceIndexer[*v1alpha3.ResourceSlice] + listers.ResourceIndexer[*resourcev1alpha3.ResourceSlice] } // NewResourceSliceLister returns a new ResourceSliceLister. func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { - return &resourceSliceLister{listers.New[*v1alpha3.ResourceSlice](indexer, v1alpha3.Resource("resourceslice"))} + return &resourceSliceLister{listers.New[*resourcev1alpha3.ResourceSlice](indexer, resourcev1alpha3.Resource("resourceslice"))} } diff --git a/vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go b/vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..a386fb269 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassLister helps list DeviceClasses. +// All objects returned here must be treated as read-only. +type DeviceClassLister interface { + // List lists all DeviceClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.DeviceClass, err error) + // Get retrieves the DeviceClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.DeviceClass, error) + DeviceClassListerExpansion +} + +// deviceClassLister implements the DeviceClassLister interface. +type deviceClassLister struct { + listers.ResourceIndexer[*resourcev1beta1.DeviceClass] +} + +// NewDeviceClassLister returns a new DeviceClassLister. +func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister { + return &deviceClassLister{listers.New[*resourcev1beta1.DeviceClass](indexer, resourcev1beta1.Resource("deviceclass"))} +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go new file mode 100644 index 000000000..c50a006d8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClassListerExpansion allows custom methods to be added to +// DeviceClassLister. +type DeviceClassListerExpansion interface{} + +// ResourceClaimListerExpansion allows custom methods to be added to +// ResourceClaimLister. +type ResourceClaimListerExpansion interface{} + +// ResourceClaimNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimNamespaceLister. +type ResourceClaimNamespaceListerExpansion interface{} + +// ResourceClaimTemplateListerExpansion allows custom methods to be added to +// ResourceClaimTemplateLister. +type ResourceClaimTemplateListerExpansion interface{} + +// ResourceClaimTemplateNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimTemplateNamespaceLister. +type ResourceClaimTemplateNamespaceListerExpansion interface{} + +// ResourceSliceListerExpansion allows custom methods to be added to +// ResourceSliceLister. +type ResourceSliceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go b/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..434227394 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimLister helps list ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimLister interface { + // List lists all ResourceClaims in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaim, err error) + // ResourceClaims returns an object that can list and get ResourceClaims. + ResourceClaims(namespace string) ResourceClaimNamespaceLister + ResourceClaimListerExpansion +} + +// resourceClaimLister implements the ResourceClaimLister interface. +type resourceClaimLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaim] +} + +// NewResourceClaimLister returns a new ResourceClaimLister. +func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { + return &resourceClaimLister{listers.New[*resourcev1beta1.ResourceClaim](indexer, resourcev1beta1.Resource("resourceclaim"))} +} + +// ResourceClaims returns an object that can list and get ResourceClaims. +func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { + return resourceClaimNamespaceLister{listers.NewNamespaced[*resourcev1beta1.ResourceClaim](s.ResourceIndexer, namespace)} +} + +// ResourceClaimNamespaceLister helps list and get ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimNamespaceLister interface { + // List lists all ResourceClaims in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaim, err error) + // Get retrieves the ResourceClaim from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.ResourceClaim, error) + ResourceClaimNamespaceListerExpansion +} + +// resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister +// interface. +type resourceClaimNamespaceLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaim] +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..152646a90 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateLister helps list ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateLister interface { + // List lists all ResourceClaimTemplates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaimTemplate, err error) + // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. + ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister + ResourceClaimTemplateListerExpansion +} + +// resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. +type resourceClaimTemplateLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaimTemplate] +} + +// NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. +func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { + return &resourceClaimTemplateLister{listers.New[*resourcev1beta1.ResourceClaimTemplate](indexer, resourcev1beta1.Resource("resourceclaimtemplate"))} +} + +// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. +func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { + return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*resourcev1beta1.ResourceClaimTemplate](s.ResourceIndexer, namespace)} +} + +// ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateNamespaceLister interface { + // List lists all ResourceClaimTemplates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaimTemplate, err error) + // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.ResourceClaimTemplate, error) + ResourceClaimTemplateNamespaceListerExpansion +} + +// resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister +// interface. +type resourceClaimTemplateNamespaceLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaimTemplate] +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go b/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..fbe108476 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceSliceLister helps list ResourceSlices. +// All objects returned here must be treated as read-only. +type ResourceSliceLister interface { + // List lists all ResourceSlices in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceSlice, err error) + // Get retrieves the ResourceSlice from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.ResourceSlice, error) + ResourceSliceListerExpansion +} + +// resourceSliceLister implements the ResourceSliceLister interface. +type resourceSliceLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceSlice] +} + +// NewResourceSliceLister returns a new ResourceSliceLister. +func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { + return &resourceSliceLister{listers.New[*resourcev1beta1.ResourceSlice](indexer, resourcev1beta1.Resource("resourceslice"))} +} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go index b9179b568..deadbe2f8 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + schedulingv1 "k8s.io/api/scheduling/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityClassLister helps list PriorityClasses. @@ -30,19 +30,19 @@ import ( type PriorityClassLister interface { // List lists all PriorityClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PriorityClass, err error) + List(selector labels.Selector) (ret []*schedulingv1.PriorityClass, err error) // Get retrieves the PriorityClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PriorityClass, error) + Get(name string) (*schedulingv1.PriorityClass, error) PriorityClassListerExpansion } // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - listers.ResourceIndexer[*v1.PriorityClass] + listers.ResourceIndexer[*schedulingv1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{listers.New[*v1.PriorityClass](indexer, v1.Resource("priorityclass"))} + return &priorityClassLister{listers.New[*schedulingv1.PriorityClass](indexer, schedulingv1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go index 776ad5ae2..7beaf4632 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityClassLister helps list PriorityClasses. @@ -30,19 +30,19 @@ import ( type PriorityClassLister interface { // List lists all PriorityClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) + List(selector labels.Selector) (ret []*schedulingv1alpha1.PriorityClass, err error) // Get retrieves the PriorityClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PriorityClass, error) + Get(name string) (*schedulingv1alpha1.PriorityClass, error) PriorityClassListerExpansion } // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - listers.ResourceIndexer[*v1alpha1.PriorityClass] + listers.ResourceIndexer[*schedulingv1alpha1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{listers.New[*v1alpha1.PriorityClass](indexer, v1alpha1.Resource("priorityclass"))} + return &priorityClassLister{listers.New[*schedulingv1alpha1.PriorityClass](indexer, schedulingv1alpha1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go index 966064e5d..92fd17de6 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityClassLister helps list PriorityClasses. @@ -30,19 +30,19 @@ import ( type PriorityClassLister interface { // List lists all PriorityClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PriorityClass, err error) + List(selector labels.Selector) (ret []*schedulingv1beta1.PriorityClass, err error) // Get retrieves the PriorityClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PriorityClass, error) + Get(name string) (*schedulingv1beta1.PriorityClass, error) PriorityClassListerExpansion } // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - listers.ResourceIndexer[*v1beta1.PriorityClass] + listers.ResourceIndexer[*schedulingv1beta1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{listers.New[*v1beta1.PriorityClass](indexer, v1beta1.Resource("priorityclass"))} + return &priorityClassLister{listers.New[*schedulingv1beta1.PriorityClass](indexer, schedulingv1beta1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go index db64f4588..dff96e691 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIDriverLister helps list CSIDrivers. @@ -30,19 +30,19 @@ import ( type CSIDriverLister interface { // List lists all CSIDrivers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSIDriver, err error) + List(selector labels.Selector) (ret []*storagev1.CSIDriver, err error) // Get retrieves the CSIDriver from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CSIDriver, error) + Get(name string) (*storagev1.CSIDriver, error) CSIDriverListerExpansion } // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - listers.ResourceIndexer[*v1.CSIDriver] + listers.ResourceIndexer[*storagev1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{listers.New[*v1.CSIDriver](indexer, v1.Resource("csidriver"))} + return &cSIDriverLister{listers.New[*storagev1.CSIDriver](indexer, storagev1.Resource("csidriver"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go index 5bfd2a43a..85a2ad6b5 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSINodeLister helps list CSINodes. @@ -30,19 +30,19 @@ import ( type CSINodeLister interface { // List lists all CSINodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSINode, err error) + List(selector labels.Selector) (ret []*storagev1.CSINode, err error) // Get retrieves the CSINode from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CSINode, error) + Get(name string) (*storagev1.CSINode, error) CSINodeListerExpansion } // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - listers.ResourceIndexer[*v1.CSINode] + listers.ResourceIndexer[*storagev1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{listers.New[*v1.CSINode](indexer, v1.Resource("csinode"))} + return &cSINodeLister{listers.New[*storagev1.CSINode](indexer, storagev1.Resource("csinode"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go index c2acfa115..3ed903a17 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIStorageCapacityLister helps list CSIStorageCapacities. @@ -30,7 +30,7 @@ import ( type CSIStorageCapacityLister interface { // List lists all CSIStorageCapacities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1.CSIStorageCapacity, err error) // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister CSIStorageCapacityListerExpansion @@ -38,17 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - listers.ResourceIndexer[*v1.CSIStorageCapacity] + listers.ResourceIndexer[*storagev1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{listers.New[*v1.CSIStorageCapacity](indexer, v1.Resource("csistoragecapacity"))} + return &cSIStorageCapacityLister{listers.New[*storagev1.CSIStorageCapacity](indexer, storagev1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1.CSIStorageCapacity](s.ResourceIndexer, namespace)} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*storagev1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -56,15 +56,15 @@ func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSISto type CSIStorageCapacityNamespaceLister interface { // List lists all CSIStorageCapacities in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1.CSIStorageCapacity, err error) // Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CSIStorageCapacity, error) + Get(name string) (*storagev1.CSIStorageCapacity, error) CSIStorageCapacityNamespaceListerExpansion } // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - listers.ResourceIndexer[*v1.CSIStorageCapacity] + listers.ResourceIndexer[*storagev1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go index fc3759444..8d595a856 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageClassLister helps list StorageClasses. @@ -30,19 +30,19 @@ import ( type StorageClassLister interface { // List lists all StorageClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.StorageClass, err error) + List(selector labels.Selector) (ret []*storagev1.StorageClass, err error) // Get retrieves the StorageClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.StorageClass, error) + Get(name string) (*storagev1.StorageClass, error) StorageClassListerExpansion } // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - listers.ResourceIndexer[*v1.StorageClass] + listers.ResourceIndexer[*storagev1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{listers.New[*v1.StorageClass](indexer, v1.Resource("storageclass"))} + return &storageClassLister{listers.New[*storagev1.StorageClass](indexer, storagev1.Resource("storageclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go index 44754d6f2..26247faad 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttachmentLister helps list VolumeAttachments. @@ -30,19 +30,19 @@ import ( type VolumeAttachmentLister interface { // List lists all VolumeAttachments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) + List(selector labels.Selector) (ret []*storagev1.VolumeAttachment, err error) // Get retrieves the VolumeAttachment from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.VolumeAttachment, error) + Get(name string) (*storagev1.VolumeAttachment, error) VolumeAttachmentListerExpansion } // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - listers.ResourceIndexer[*v1.VolumeAttachment] + listers.ResourceIndexer[*storagev1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{listers.New[*v1.VolumeAttachment](indexer, v1.Resource("volumeattachment"))} + return &volumeAttachmentLister{listers.New[*storagev1.VolumeAttachment](indexer, storagev1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go index 7f75aae2c..62127edf9 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIStorageCapacityLister helps list CSIStorageCapacities. @@ -30,7 +30,7 @@ import ( type CSIStorageCapacityLister interface { // List lists all CSIStorageCapacities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.CSIStorageCapacity, err error) // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister CSIStorageCapacityListerExpansion @@ -38,17 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity] + listers.ResourceIndexer[*storagev1alpha1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{listers.New[*v1alpha1.CSIStorageCapacity](indexer, v1alpha1.Resource("csistoragecapacity"))} + return &cSIStorageCapacityLister{listers.New[*storagev1alpha1.CSIStorageCapacity](indexer, storagev1alpha1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1alpha1.CSIStorageCapacity](s.ResourceIndexer, namespace)} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*storagev1alpha1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -56,15 +56,15 @@ func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSISto type CSIStorageCapacityNamespaceLister interface { // List lists all CSIStorageCapacities in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.CSIStorageCapacity, err error) // Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.CSIStorageCapacity, error) + Get(name string) (*storagev1alpha1.CSIStorageCapacity, error) CSIStorageCapacityNamespaceListerExpansion } // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity] + listers.ResourceIndexer[*storagev1alpha1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go index 122864ffe..9604e4c1b 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttachmentLister helps list VolumeAttachments. @@ -30,19 +30,19 @@ import ( type VolumeAttachmentLister interface { // List lists all VolumeAttachments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.VolumeAttachment, err error) // Get retrieves the VolumeAttachment from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VolumeAttachment, error) + Get(name string) (*storagev1alpha1.VolumeAttachment, error) VolumeAttachmentListerExpansion } // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - listers.ResourceIndexer[*v1alpha1.VolumeAttachment] + listers.ResourceIndexer[*storagev1alpha1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{listers.New[*v1alpha1.VolumeAttachment](indexer, v1alpha1.Resource("volumeattachment"))} + return &volumeAttachmentLister{listers.New[*storagev1alpha1.VolumeAttachment](indexer, storagev1alpha1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go index 5d8ae09d7..08710c981 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttributesClassLister helps list VolumeAttributesClasses. @@ -30,19 +30,19 @@ import ( type VolumeAttributesClassLister interface { // List lists all VolumeAttributesClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.VolumeAttributesClass, err error) // Get retrieves the VolumeAttributesClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VolumeAttributesClass, error) + Get(name string) (*storagev1alpha1.VolumeAttributesClass, error) VolumeAttributesClassListerExpansion } // volumeAttributesClassLister implements the VolumeAttributesClassLister interface. type volumeAttributesClassLister struct { - listers.ResourceIndexer[*v1alpha1.VolumeAttributesClass] + listers.ResourceIndexer[*storagev1alpha1.VolumeAttributesClass] } // NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { - return &volumeAttributesClassLister{listers.New[*v1alpha1.VolumeAttributesClass](indexer, v1alpha1.Resource("volumeattributesclass"))} + return &volumeAttributesClassLister{listers.New[*storagev1alpha1.VolumeAttributesClass](indexer, storagev1alpha1.Resource("volumeattributesclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go index 660038674..31a7e5a2b 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIDriverLister helps list CSIDrivers. @@ -30,19 +30,19 @@ import ( type CSIDriverLister interface { // List lists all CSIDrivers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSIDriver, err error) // Get retrieves the CSIDriver from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CSIDriver, error) + Get(name string) (*storagev1beta1.CSIDriver, error) CSIDriverListerExpansion } // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - listers.ResourceIndexer[*v1beta1.CSIDriver] + listers.ResourceIndexer[*storagev1beta1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{listers.New[*v1beta1.CSIDriver](indexer, v1beta1.Resource("csidriver"))} + return &cSIDriverLister{listers.New[*storagev1beta1.CSIDriver](indexer, storagev1beta1.Resource("csidriver"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go index 2c29ccabf..1ab0942c3 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSINodeLister helps list CSINodes. @@ -30,19 +30,19 @@ import ( type CSINodeLister interface { // List lists all CSINodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSINode, err error) // Get retrieves the CSINode from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CSINode, error) + Get(name string) (*storagev1beta1.CSINode, error) CSINodeListerExpansion } // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - listers.ResourceIndexer[*v1beta1.CSINode] + listers.ResourceIndexer[*storagev1beta1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{listers.New[*v1beta1.CSINode](indexer, v1beta1.Resource("csinode"))} + return &cSINodeLister{listers.New[*storagev1beta1.CSINode](indexer, storagev1beta1.Resource("csinode"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go index 365304df1..25ff95237 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIStorageCapacityLister helps list CSIStorageCapacities. @@ -30,7 +30,7 @@ import ( type CSIStorageCapacityLister interface { // List lists all CSIStorageCapacities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSIStorageCapacity, err error) // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister CSIStorageCapacityListerExpansion @@ -38,17 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - listers.ResourceIndexer[*v1beta1.CSIStorageCapacity] + listers.ResourceIndexer[*storagev1beta1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{listers.New[*v1beta1.CSIStorageCapacity](indexer, v1beta1.Resource("csistoragecapacity"))} + return &cSIStorageCapacityLister{listers.New[*storagev1beta1.CSIStorageCapacity](indexer, storagev1beta1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1beta1.CSIStorageCapacity](s.ResourceIndexer, namespace)} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*storagev1beta1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -56,15 +56,15 @@ func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSISto type CSIStorageCapacityNamespaceLister interface { // List lists all CSIStorageCapacities in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSIStorageCapacity, err error) // Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CSIStorageCapacity, error) + Get(name string) (*storagev1beta1.CSIStorageCapacity, error) CSIStorageCapacityNamespaceListerExpansion } // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - listers.ResourceIndexer[*v1beta1.CSIStorageCapacity] + listers.ResourceIndexer[*storagev1beta1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go index 070c061bc..3928cbacd 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageClassLister helps list StorageClasses. @@ -30,19 +30,19 @@ import ( type StorageClassLister interface { // List lists all StorageClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) + List(selector labels.Selector) (ret []*storagev1beta1.StorageClass, err error) // Get retrieves the StorageClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.StorageClass, error) + Get(name string) (*storagev1beta1.StorageClass, error) StorageClassListerExpansion } // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - listers.ResourceIndexer[*v1beta1.StorageClass] + listers.ResourceIndexer[*storagev1beta1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{listers.New[*v1beta1.StorageClass](indexer, v1beta1.Resource("storageclass"))} + return &storageClassLister{listers.New[*storagev1beta1.StorageClass](indexer, storagev1beta1.Resource("storageclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go index d437c1eae..aab4bef13 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttachmentLister helps list VolumeAttachments. @@ -30,19 +30,19 @@ import ( type VolumeAttachmentLister interface { // List lists all VolumeAttachments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.VolumeAttachment, err error) + List(selector labels.Selector) (ret []*storagev1beta1.VolumeAttachment, err error) // Get retrieves the VolumeAttachment from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.VolumeAttachment, error) + Get(name string) (*storagev1beta1.VolumeAttachment, error) VolumeAttachmentListerExpansion } // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - listers.ResourceIndexer[*v1beta1.VolumeAttachment] + listers.ResourceIndexer[*storagev1beta1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{listers.New[*v1beta1.VolumeAttachment](indexer, v1beta1.Resource("volumeattachment"))} + return &volumeAttachmentLister{listers.New[*storagev1beta1.VolumeAttachment](indexer, storagev1beta1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go index 2ff71e3d7..8b9724ed9 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttributesClassLister helps list VolumeAttributesClasses. @@ -30,19 +30,19 @@ import ( type VolumeAttributesClassLister interface { // List lists all VolumeAttributesClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.VolumeAttributesClass, err error) + List(selector labels.Selector) (ret []*storagev1beta1.VolumeAttributesClass, err error) // Get retrieves the VolumeAttributesClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.VolumeAttributesClass, error) + Get(name string) (*storagev1beta1.VolumeAttributesClass, error) VolumeAttributesClassListerExpansion } // volumeAttributesClassLister implements the VolumeAttributesClassLister interface. type volumeAttributesClassLister struct { - listers.ResourceIndexer[*v1beta1.VolumeAttributesClass] + listers.ResourceIndexer[*storagev1beta1.VolumeAttributesClass] } // NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { - return &volumeAttributesClassLister{listers.New[*v1beta1.VolumeAttributesClass](indexer, v1beta1.Resource("volumeattributesclass"))} + return &volumeAttributesClassLister{listers.New[*storagev1beta1.VolumeAttributesClass](indexer, storagev1beta1.Resource("volumeattributesclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go index 794dba25c..e7d164d04 100644 --- a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageVersionMigrationLister helps list StorageVersionMigrations. @@ -30,19 +30,19 @@ import ( type StorageVersionMigrationLister interface { // List lists all StorageVersionMigrations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.StorageVersionMigration, err error) + List(selector labels.Selector) (ret []*storagemigrationv1alpha1.StorageVersionMigration, err error) // Get retrieves the StorageVersionMigration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.StorageVersionMigration, error) + Get(name string) (*storagemigrationv1alpha1.StorageVersionMigration, error) StorageVersionMigrationListerExpansion } // storageVersionMigrationLister implements the StorageVersionMigrationLister interface. type storageVersionMigrationLister struct { - listers.ResourceIndexer[*v1alpha1.StorageVersionMigration] + listers.ResourceIndexer[*storagemigrationv1alpha1.StorageVersionMigration] } // NewStorageVersionMigrationLister returns a new StorageVersionMigrationLister. func NewStorageVersionMigrationLister(indexer cache.Indexer) StorageVersionMigrationLister { - return &storageVersionMigrationLister{listers.New[*v1alpha1.StorageVersionMigration](indexer, v1alpha1.Resource("storageversionmigration"))} + return &storageVersionMigrationLister{listers.New[*storagemigrationv1alpha1.StorageVersionMigration](indexer, storagemigrationv1alpha1.Resource("storageversionmigration"))} } diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go index 2cc7e22ad..a19ba1304 100644 --- a/vendor/k8s.io/client-go/metadata/metadata.go +++ b/vendor/k8s.io/client-go/metadata/metadata.go @@ -33,6 +33,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" ) var deleteScheme = runtime.NewScheme() @@ -218,6 +220,24 @@ func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, s // List returns all resources within the specified scope (namespace or cluster). func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.FromContext(ctx).Error(watchListOptionsErr, "Failed preparing watchlist options, falling back to the standard LIST semantics", "resource", c.resource) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result) + return result, nil + } + klog.FromContext(ctx).Error(err, "The watchlist request ended with an error, falling back to the standard LIST semantics", "resource", c.resource) + } + result, err := c.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result) + } + return result, err +} + +func (c *client) list(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). @@ -249,6 +269,25 @@ func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.Par return partial, nil } +// watchList establishes a watch stream with the server and returns PartialObjectMetadataList. +func (c *client) watchList(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + + result := &metav1.PartialObjectMetadataList{} + err := c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + WatchList(ctx). + Into(result) + + return result, err +} + // Watch finds all changes to the resources in the specified scope (namespace or cluster). func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration diff --git a/vendor/k8s.io/client-go/openapi/cached/groupversion.go b/vendor/k8s.io/client-go/openapi/cached/groupversion.go index 65a4189f7..73730c51b 100644 --- a/vendor/k8s.io/client-go/openapi/cached/groupversion.go +++ b/vendor/k8s.io/client-go/openapi/cached/groupversion.go @@ -56,3 +56,7 @@ func (g *groupversion) Schema(contentType string) ([]byte, error) { return cachedInfo.data, cachedInfo.err } + +func (c *groupversion) ServerRelativeURL() string { + return c.delegate.ServerRelativeURL() +} diff --git a/vendor/k8s.io/client-go/openapi/groupversion.go b/vendor/k8s.io/client-go/openapi/groupversion.go index 601dcbe3c..40d91b9a5 100644 --- a/vendor/k8s.io/client-go/openapi/groupversion.go +++ b/vendor/k8s.io/client-go/openapi/groupversion.go @@ -27,6 +27,12 @@ const ContentTypeOpenAPIV3PB = "application/com.github.proto-openapi.spec.v3@v1. type GroupVersion interface { Schema(contentType string) ([]byte, error) + + // ServerRelativeURL. Returns the path and parameters used to fetch the schema. + // You should use the Schema method to fetch it, but this value can be used + // to key the current version of the schema in a cache since it contains a + // hash string which changes upon schema update. + ServerRelativeURL() string } type groupversion struct { @@ -68,3 +74,9 @@ func (g *groupversion) Schema(contentType string) ([]byte, error) { return path.Do(context.TODO()).Raw() } + +// URL used for fetching the schema. The URL includes a hash and can be used +// to key the current version of the schema in a cache. +func (g *groupversion) ServerRelativeURL() string { + return g.item.ServerRelativeURL +} diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 60df7e568..159caa13f 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -17,16 +17,21 @@ limitations under the License. package rest import ( + "fmt" + "mime" "net/http" "net/url" "os" "strconv" "strings" + "sync/atomic" "time" + "github.com/munnerz/goautoneg" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/util/flowcontrol" ) @@ -85,7 +90,7 @@ type RESTClient struct { versionedAPIPath string // content describes how a RESTClient encodes and decodes responses. - content ClientContentConfig + content requestClientContentConfigProvider // creates BackoffManager that is passed to requests. createBackoffMgr func() BackoffManager @@ -105,10 +110,6 @@ type RESTClient struct { // NewRESTClient creates a new RESTClient. This client performs generic REST functions // such as Get, Put, Post, and Delete on specified paths. func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { - if len(config.ContentType) == 0 { - config.ContentType = "application/json" - } - base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" @@ -119,14 +120,53 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientConte return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, - content: config, + content: requestClientContentConfigProvider{base: scrubCBORContentConfigIfDisabled(config)}, createBackoffMgr: readExpBackoffConfig, rateLimiter: rateLimiter, - - Client: client, + Client: client, }, nil } +func scrubCBORContentConfigIfDisabled(content ClientContentConfig) ClientContentConfig { + if clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + content.Negotiator = clientNegotiatorWithCBORSequenceStreamDecoder{content.Negotiator} + return content + } + + if mediatype, _, err := mime.ParseMediaType(content.ContentType); err == nil && mediatype == "application/cbor" { + content.ContentType = "application/json" + } + + clauses := goautoneg.ParseAccept(content.AcceptContentTypes) + scrubbed := false + for i, clause := range clauses { + if clause.Type == "application" && clause.SubType == "cbor" { + scrubbed = true + clauses[i].SubType = "json" + } + } + if !scrubbed { + // No application/cbor in AcceptContentTypes, nothing more to do. + return content + } + + parts := make([]string, 0, len(clauses)) + for _, clause := range clauses { + // ParseAccept does not store the parameter "q" in Params. + params := clause.Params + if clause.Q < 1 { // omit q=1, it's the default + if params == nil { + params = make(map[string]string, 1) + } + params["q"] = strconv.FormatFloat(clause.Q, 'g', 3, 32) + } + parts = append(parts, mime.FormatMediaType(fmt.Sprintf("%s/%s", clause.Type, clause.SubType), params)) + } + content.AcceptContentTypes = strings.Join(parts, ",") + + return content +} + // GetRateLimiter returns rate limiter for a given client, or nil if it's called on a nil client func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { if c == nil { @@ -198,5 +238,106 @@ func (c *RESTClient) Delete() *Request { // APIVersion returns the APIVersion this RESTClient is expected to use. func (c *RESTClient) APIVersion() schema.GroupVersion { - return c.content.GroupVersion + config, _ := c.content.GetClientContentConfig() + return config.GroupVersion +} + +// requestClientContentConfigProvider observes HTTP 415 (Unsupported Media Type) responses to detect +// that the server does not understand CBOR. Once this has happened, future requests are forced to +// use JSON so they can succeed. This is convenient for client users that want to prefer CBOR, but +// also need to interoperate with older servers so requests do not permanently fail. The clients +// will not default to using CBOR until at least all supported kube-apiservers have enable-CBOR +// locked to true, so this path will be rarely taken. Additionally, all generated clients accessing +// built-in kube resources are forced to protobuf, so those will not degrade to JSON. +type requestClientContentConfigProvider struct { + base ClientContentConfig + + // Becomes permanently true if a server responds with HTTP 415 (Unsupported Media Type) to a + // request with "Content-Type" header containing the CBOR media type. + sawUnsupportedMediaTypeForCBOR atomic.Bool +} + +// GetClientContentConfig returns the ClientContentConfig that should be used for new requests by +// this client and true if the request ContentType was selected by default. +func (p *requestClientContentConfigProvider) GetClientContentConfig() (ClientContentConfig, bool) { + config := p.base + + defaulted := config.ContentType == "" + if defaulted { + config.ContentType = "application/json" + } + + if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + return config, defaulted + } + + if defaulted && clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsPreferCBOR) { + config.ContentType = "application/cbor" + } + + if sawUnsupportedMediaTypeForCBOR := p.sawUnsupportedMediaTypeForCBOR.Load(); !sawUnsupportedMediaTypeForCBOR { + return config, defaulted + } + + if mediaType, _, _ := mime.ParseMediaType(config.ContentType); mediaType != runtime.ContentTypeCBOR { + return config, defaulted + } + + // The effective ContentType is CBOR and the client has previously received an HTTP 415 in + // response to a CBOR request. Override ContentType to JSON. + config.ContentType = runtime.ContentTypeJSON + return config, defaulted +} + +// UnsupportedMediaType reports that the server has responded to a request with HTTP 415 Unsupported +// Media Type. +func (p *requestClientContentConfigProvider) UnsupportedMediaType(requestContentType string) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + return + } + + // This could be extended to consider the Content-Encoding request header, the Accept and + // Accept-Encoding response headers, the request method, and URI (as mentioned in + // https://www.rfc-editor.org/rfc/rfc9110.html#section-15.5.16). The request Content-Type + // header is sufficient to implement a blanket CBOR fallback mechanism. + requestContentType, _, _ = mime.ParseMediaType(requestContentType) + switch requestContentType { + case runtime.ContentTypeCBOR, string(types.ApplyCBORPatchType): + p.sawUnsupportedMediaTypeForCBOR.Store(true) + } +} + +// clientNegotiatorWithCBORSequenceStreamDecoder is a ClientNegotiator that delegates to another +// ClientNegotiator to select the appropriate Encoder or Decoder for a given media type. As a +// special case, it will resolve "application/cbor-seq" (a CBOR Sequence, the concatenation of zero +// or more CBOR data items) as an alias for "application/cbor" (exactly one CBOR data item) when +// selecting a stream decoder. +type clientNegotiatorWithCBORSequenceStreamDecoder struct { + negotiator runtime.ClientNegotiator +} + +func (n clientNegotiatorWithCBORSequenceStreamDecoder) Encoder(contentType string, params map[string]string) (runtime.Encoder, error) { + return n.negotiator.Encoder(contentType, params) +} + +func (n clientNegotiatorWithCBORSequenceStreamDecoder) Decoder(contentType string, params map[string]string) (runtime.Decoder, error) { + return n.negotiator.Decoder(contentType, params) +} + +func (n clientNegotiatorWithCBORSequenceStreamDecoder) StreamDecoder(contentType string, params map[string]string) (runtime.Decoder, runtime.Serializer, runtime.Framer, error) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + return n.negotiator.StreamDecoder(contentType, params) + } + + switch contentType { + case runtime.ContentTypeCBORSequence: + return n.negotiator.StreamDecoder(runtime.ContentTypeCBOR, params) + case runtime.ContentTypeCBOR: + // This media type is only appropriate for exactly one data item, not the zero or + // more events of a watch stream. + return nil, nil, nil, runtime.NegotiateError{ContentType: contentType, Stream: true} + default: + return n.negotiator.StreamDecoder(contentType, params) + } + } diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index f8ff7e928..f2e813d07 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -32,6 +32,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor" + "k8s.io/client-go/features" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" @@ -113,6 +116,9 @@ type Config struct { // QPS indicates the maximum QPS to the master from this client. // If it's zero, the created RESTClient will use DefaultQPS: 5 + // + // Setting this to a negative value will disable client-side ratelimiting + // unless `Ratelimiter` is also set. QPS float32 // Maximum burst for throttle. @@ -669,3 +675,19 @@ func CopyConfig(config *Config) *Config { } return c } + +// CodecFactoryForGeneratedClient returns the provided CodecFactory if there are no enabled client +// feature gates affecting serialization. Otherwise, it constructs and returns a new CodecFactory +// from the provided Scheme. +// +// This is supported ONLY for use by clients generated with client-gen. The caller is responsible +// for ensuring that the CodecFactory argument was constructed using the Scheme argument. +func CodecFactoryForGeneratedClient(scheme *runtime.Scheme, codecs serializer.CodecFactory) serializer.CodecFactory { + if !features.FeatureGates().Enabled(features.ClientsAllowCBOR) { + // NOTE: This assumes client-gen will not generate CBOR-enabled Codecs as long as + // the feature gate exists. + return codecs + } + + return serializer.NewCodecFactory(scheme, serializer.WithSerializer(cbor.NewSerializerInfo)) +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index f5a9f68ca..0ec90ad18 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -19,6 +19,7 @@ package rest import ( "bytes" "context" + "encoding/base64" "encoding/hex" "fmt" "io" @@ -99,6 +100,9 @@ func defaultRequestRetryFn(maxRetries int) WithRetry { type Request struct { c *RESTClient + contentConfig ClientContentConfig + contentTypeNotSet bool + warningHandler WarningHandler rateLimiter flowcontrol.RateLimiter @@ -123,7 +127,7 @@ type Request struct { // output err error - // only one of body / bodyBytes may be set. requests using body are not retriable. + // only one of body / bodyBytes may be set. requests using body are not retryable. body io.Reader bodyBytes []byte @@ -152,6 +156,11 @@ func NewRequest(c *RESTClient) *Request { timeout = c.Client.Timeout } + // A request needs to know whether the content type was explicitly configured or selected by + // default in order to support the per-request Protobuf override used by clients generated + // with --prefers-protobuf. + contentConfig, contentTypeDefaulted := c.content.GetClientContentConfig() + r := &Request{ c: c, rateLimiter: c.rateLimiter, @@ -161,14 +170,12 @@ func NewRequest(c *RESTClient) *Request { maxRetries: 10, retryFn: defaultRequestRetryFn, warningHandler: c.warningHandler, - } - switch { - case len(c.content.AcceptContentTypes) > 0: - r.SetHeader("Accept", c.content.AcceptContentTypes) - case len(c.content.ContentType) > 0: - r.SetHeader("Accept", c.content.ContentType+", */*") + contentConfig: contentConfig, + contentTypeNotSet: contentTypeDefaulted, } + + r.setAcceptHeader() return r } @@ -177,11 +184,36 @@ func NewRequestWithClient(base *url.URL, versionedAPIPath string, content Client return NewRequest(&RESTClient{ base: base, versionedAPIPath: versionedAPIPath, - content: content, + content: requestClientContentConfigProvider{base: content}, Client: client, }) } +func (r *Request) UseProtobufAsDefaultIfPreferred(prefersProtobuf bool) *Request { + if prefersProtobuf { + return r.UseProtobufAsDefault() + } + return r +} + +func (r *Request) UseProtobufAsDefault() *Request { + if r.contentTypeNotSet && len(r.contentConfig.AcceptContentTypes) == 0 { + r.contentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + r.contentConfig.ContentType = "application/vnd.kubernetes.protobuf" + r.setAcceptHeader() + } + return r +} + +func (r *Request) setAcceptHeader() { + switch { + case len(r.contentConfig.AcceptContentTypes) > 0: + r.SetHeader("Accept", r.contentConfig.AcceptContentTypes) + case len(r.contentConfig.ContentType) > 0: + r.SetHeader("Accept", r.contentConfig.ContentType+", */*") + } +} + // Verb sets the verb this request will use. func (r *Request) Verb(verb string) *Request { r.verb = verb @@ -370,7 +402,7 @@ func (r *Request) Param(paramName, s string) *Request { // VersionedParams will not write query parameters that have omitempty set and are empty. If a // parameter has already been set it is appended to (Params and VersionedParams are additive). func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { - return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion) + return r.SpecificallyVersionedParams(obj, codec, r.contentConfig.GroupVersion) } func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request { @@ -450,11 +482,9 @@ func (r *Request) Body(obj interface{}) *Request { r.err = err return r } - glogBody("Request Body", data) r.body = nil r.bodyBytes = data case []byte: - glogBody("Request Body", t) r.body = nil r.bodyBytes = t case io.Reader: @@ -465,7 +495,7 @@ func (r *Request) Body(obj interface{}) *Request { if reflect.ValueOf(t).IsNil() { return r } - encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil) + encoder, err := r.contentConfig.Negotiator.Encoder(r.contentConfig.ContentType, nil) if err != nil { r.err = err return r @@ -475,10 +505,9 @@ func (r *Request) Body(obj interface{}) *Request { r.err = err return r } - glogBody("Request Body", data) r.body = nil r.bodyBytes = data - r.SetHeader("Content-Type", r.c.content.ContentType) + r.SetHeader("Content-Type", r.contentConfig.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) } @@ -704,10 +733,19 @@ func (b *throttledLogger) Infof(message string, args ...interface{}) { // Watch attempts to begin watching the requested location. // Returns a watch.Interface, or an error. func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { + w, _, e := r.watchInternal(ctx) + return w, e +} + +func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + // We specifically don't want to rate limit watches, so we // don't use r.rateLimiter here. if r.err != nil { - return nil, r.err + return nil, nil, r.err } client := r.c.Client @@ -727,12 +765,12 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { url := r.URL().String() for { if err := retry.Before(ctx, r); err != nil { - return nil, retry.WrapPreviousError(err) + return nil, nil, retry.WrapPreviousError(err) } req, err := r.newHTTPRequest(ctx) if err != nil { - return nil, err + return nil, nil, err } resp, err := client.Do(req) @@ -752,21 +790,22 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { // the server must have sent us an error in 'err' return true, nil } - if result := r.transformResponse(resp, req); result.err != nil { - return true, result.err + result := r.transformResponse(ctx, resp, req) + if err := result.Error(); err != nil { + return true, err } return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) }() if done { if isErrRetryableFunc(req, err) { - return watch.NewEmptyWatch(), nil + return watch.NewEmptyWatch(), nil, nil } if err == nil { // if the server sent us an HTTP Response object, // we need to return the error object from that. err = transformErr } - return nil, retry.WrapPreviousError(err) + return nil, nil, retry.WrapPreviousError(err) } } } @@ -784,22 +823,35 @@ type WatchListResult struct { // the end of the stream. initialEventsEndBookmarkRV string - // gv represents the API version - // it is used to construct the final list response - // normally this information is filled by the server - gv schema.GroupVersion + // negotiatedObjectDecoder knows how to decode + // the initialEventsListBlueprint + negotiatedObjectDecoder runtime.Decoder + + // base64EncodedInitialEventsListBlueprint contains an empty, + // versioned list encoded in the requested format + // (e.g., protobuf, JSON, CBOR) and stored as a base64-encoded string + base64EncodedInitialEventsListBlueprint string } +// Into stores the result into obj. The passed obj parameter must be a pointer to a list type. +// +// Note: +// +// Special attention should be given to the type *unstructured.Unstructured, +// which represents a list type but does not have an "Items" field. +// Users who directly use RESTClient may store the response in such an object. +// This particular case is not handled by the current implementation of this function, +// but may be considered for future updates. func (r WatchListResult) Into(obj runtime.Object) error { if r.err != nil { return r.err } - listPtr, err := meta.GetItemsPtr(obj) + listItemsPtr, err := meta.GetItemsPtr(obj) if err != nil { return err } - listVal, err := conversion.EnforcePtr(listPtr) + listVal, err := conversion.EnforcePtr(listItemsPtr) if err != nil { return err } @@ -807,6 +859,16 @@ func (r WatchListResult) Into(obj runtime.Object) error { return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) } + encodedInitialEventsListBlueprint, err := base64.StdEncoding.DecodeString(r.base64EncodedInitialEventsListBlueprint) + if err != nil { + return fmt.Errorf("failed to decode the received blueprint list, err %w", err) + } + + err = runtime.DecodeInto(r.negotiatedObjectDecoder, encodedInitialEventsListBlueprint, obj) + if err != nil { + return err + } + if len(r.items) == 0 { listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) } else { @@ -824,15 +886,6 @@ func (r WatchListResult) Into(obj runtime.Object) error { return err } listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV) - - typeMeta, err := meta.TypeAccessor(obj) - if err != nil { - return err - } - version := r.gv.String() - typeMeta.SetAPIVersion(version) - typeMeta.SetKind(reflect.TypeOf(obj).Elem().Name()) - return nil } @@ -844,6 +897,10 @@ func (r WatchListResult) Into(obj runtime.Object) error { // Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists // to see what parameters are currently required. func (r *Request) WatchList(ctx context.Context) WatchListResult { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)} } @@ -851,16 +908,16 @@ func (r *Request) WatchList(ctx context.Context) WatchListResult { // Most users use the generated client, which handles the proper setting of parameters. // We don't have validation for other methods (e.g., the Watch) // thus, for symmetry, we haven't added additional checks for the WatchList method. - w, err := r.Watch(ctx) + w, d, err := r.watchInternal(ctx) if err != nil { return WatchListResult{err: err} } - return r.handleWatchList(ctx, w) + return r.handleWatchList(ctx, w, d) } // handleWatchList holds the actual logic for easier unit testing. // Note that this function will close the passed watch. -func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchListResult { +func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negotiatedObjectDecoder runtime.Decoder) WatchListResult { defer w.Stop() var lastKey string var items []runtime.Object @@ -894,10 +951,15 @@ func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchL lastKey = key case watch.Bookmark: if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + base64EncodedInitialEventsListBlueprint := meta.GetAnnotations()[metav1.InitialEventsListBlueprintAnnotationKey] + if len(base64EncodedInitialEventsListBlueprint) == 0 { + return WatchListResult{err: fmt.Errorf("%q annotation is missing content", metav1.InitialEventsListBlueprintAnnotationKey)} + } return WatchListResult{ - items: items, - initialEventsEndBookmarkRV: meta.GetResourceVersion(), - gv: r.c.content.GroupVersion, + items: items, + initialEventsEndBookmarkRV: meta.GetResourceVersion(), + negotiatedObjectDecoder: negotiatedObjectDecoder, + base64EncodedInitialEventsListBlueprint: base64EncodedInitialEventsListBlueprint, } } default: @@ -907,15 +969,15 @@ func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchL } } -func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) { +func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, runtime.Decoder, error) { contentType := resp.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err) } - objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params) + objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params) if err != nil { - return nil, err + return nil, nil, err } handleWarnings(resp.Header, r.warningHandler) @@ -928,7 +990,7 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) // use 500 to indicate that the cause of the error is unknown - other error codes // are more specific to HTTP interactions, and set a reason errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), - ), nil + ), objectDecoder, nil } // updateRequestResultMetric increments the RequestResult metric counter, @@ -968,6 +1030,10 @@ func sanitize(req *Request, resp *http.Response, err error) (string, string) { // Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. // If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + if r.err != nil { return nil, r.err } @@ -1011,7 +1077,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) { return false, nil } - result := r.transformResponse(resp, req) + result := r.transformResponse(ctx, resp, req) if err := result.Error(); err != nil { return true, err } @@ -1143,7 +1209,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp return false } // For connection errors and apiserver shutdown errors retry. - if net.IsConnectionReset(err) || net.IsProbableEOF(err) { + if net.IsConnectionReset(err) || net.IsProbableEOF(err) || net.IsHTTP2ConnectionLost(err) { return true } return false @@ -1165,6 +1231,9 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength)) } + if resp != nil && resp.StatusCode == http.StatusUnsupportedMediaType { + r.c.content.UnsupportedMediaType(resp.Request.Header.Get("Content-Type")) + } retry.After(ctx, r, resp, err) done := func() bool { @@ -1198,9 +1267,13 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp // - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // - http.Client.Do errors are returned directly. func (r *Request) Do(ctx context.Context) Result { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { - result = r.transformResponse(resp, req) + result = r.transformResponse(ctx, resp, req) }) if err != nil { return Result{err: err} @@ -1213,10 +1286,14 @@ func (r *Request) Do(ctx context.Context) Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { result.body, result.err = io.ReadAll(resp.Body) - glogBody("Response Body", result.body) + logBody(ctx, 2, "Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { result.err = r.transformUnstructuredResponseError(resp, req, result.body) } @@ -1231,7 +1308,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { } // transformResponse converts an API response into a structured API object -func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { +func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result { var body []byte if resp.Body != nil { data, err := io.ReadAll(resp.Body) @@ -1260,13 +1337,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } } - glogBody("Response Body", body) + // Call depth is tricky. This one is okay for Do and DoRaw. + logBody(ctx, 7, "Response Body", body) // verify the content type is accurate var decoder runtime.Decoder contentType := resp.Header.Get("Content-Type") if len(contentType) == 0 { - contentType = r.c.content.ContentType + contentType = r.contentConfig.ContentType } if len(contentType) > 0 { var err error @@ -1274,7 +1352,7 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu if err != nil { return Result{err: errors.NewInternalError(err)} } - decoder, err = r.c.content.Negotiator.Decoder(mediaType, params) + decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params) if err != nil { // if we fail to negotiate a decoder, treat this as an unstructured error switch { @@ -1320,14 +1398,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } // truncateBody decides if the body should be truncated, based on the glog Verbosity. -func truncateBody(body string) string { +func truncateBody(logger klog.Logger, body string) string { max := 0 switch { - case bool(klog.V(10).Enabled()): + case bool(logger.V(10).Enabled()): return body - case bool(klog.V(9).Enabled()): + case bool(logger.V(9).Enabled()): max = 10240 - case bool(klog.V(8).Enabled()): + case bool(logger.V(8).Enabled()): max = 1024 } @@ -1338,17 +1416,21 @@ func truncateBody(body string) string { return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max) } -// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against +// logBody logs a body output that could be either JSON or protobuf. It explicitly guards against // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. -func glogBody(prefix string, body []byte) { - if klogV := klog.V(8); klogV.Enabled() { +// +// It needs to be called by all functions which send or receive the data. +func logBody(ctx context.Context, callDepth int, prefix string, body []byte) { + logger := klog.FromContext(ctx) + if loggerV := logger.V(8); loggerV.Enabled() { + loggerV := loggerV.WithCallDepth(callDepth) if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + loggerV.Info(prefix, "body", truncateBody(logger, hex.Dump(body))) } else { - klogV.Infof("%s: %s", prefix, truncateBody(string(body))) + loggerV.Info(prefix, "body", truncateBody(logger, string(body))) } } } @@ -1397,7 +1479,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, } var groupResource schema.GroupResource if len(r.resource) > 0 { - groupResource.Group = r.c.content.GroupVersion.Group + groupResource.Group = r.contentConfig.GroupVersion.Group groupResource.Resource = r.resource } return errors.NewGenericServerResponse( diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go index c4ce6e3b8..0a0ab7917 100644 --- a/vendor/k8s.io/client-go/rest/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -61,7 +61,7 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de return hostURL, versionedAPIPath, nil } -// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given +// DefaultVersionedAPIPath constructs the default path for the given group version, assuming the given // API path, following the standard conventions of the Kubernetes API. func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string { versionedAPIPath := path.Join("/", apiPath) diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index 9e1e04d14..c2b68cbcb 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package versioned +package watch import ( "fmt" diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go index e55aa12d9..a95b4985c 100644 --- a/vendor/k8s.io/client-go/rest/watch/encoder.go +++ b/vendor/k8s.io/client-go/rest/watch/encoder.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package versioned +package watch import ( "encoding/json" diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index 270cc4ddb..e7af4d6e8 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -29,6 +29,10 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// All NewRoot... functions return non-namespaced actions, and are equivalent to +// calling the corresponding New... function with an empty namespace. +// This is assumed by the fake client generator. + func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{}) } diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index d288a3aa4..15b3e5334 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -214,6 +214,7 @@ func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil { return nil, err } + patchObj.SetName(action.GetName()) err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions) if err != nil { return nil, err diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 921ac2fa0..fc441e0ef 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -7,7 +7,6 @@ approvers: - deads2k - caesarxuchao - liggitt - - ncdc reviewers: - thockin - smarterclayton @@ -23,6 +22,6 @@ reviewers: - jsafrane - dims - ingvagabund - - ncdc emeritus_approvers: - lavalamp + - ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 5e7dd5740..030b45297 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -57,7 +57,7 @@ var ( // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { - // name identifies this reflector. By default it will be a file:line if possible. + // name identifies this reflector. By default, it will be a file:line if possible. name string // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the @@ -121,6 +121,14 @@ type Reflector struct { UseWatchList *bool } +func (r *Reflector) Name() string { + return r.name +} + +func (r *Reflector) TypeDescription() string { + return r.typeDescription +} + // ResourceVersionUpdater is an interface that allows store implementation to // track the current resource version of the reflector. This is especially // important if storage bookmarks are enabled. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go index 35bb5dde1..c575652b1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go @@ -50,7 +50,7 @@ func init() { Scheme = runtime.NewScheme() utilruntime.Must(api.AddToScheme(Scheme)) utilruntime.Must(v1.AddToScheme(Scheme)) - yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) + yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true}) Codec = versioning.NewDefaultingCodecForScheme( Scheme, yamlSerializer, diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 952f6d7eb..cd0a8649b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -29,8 +29,6 @@ import ( clientauth "k8s.io/client-go/tools/auth" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog/v2" - - "github.com/imdario/mergo" ) const ( @@ -241,45 +239,37 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { if err != nil { return nil, err } - mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride) + if err := merge(clientConfig, userAuthPartialConfig); err != nil { + return nil, err + } - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { + serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo) + if err := merge(clientConfig, serverAuthPartialConfig); err != nil { return nil, err } - mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride) } return clientConfig, nil } // clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { - mergedConfig := &restclient.Config{} +// both, so we have to split the objects and merge them separately. - // configClusterInfo holds the information identify the server provided by .kubeconfig +// getServerIdentificationPartialConfig extracts server identification information from configClusterInfo +// (the final result of command line flags and merged .kubeconfig files). +func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config { configClientConfig := &restclient.Config{} configClientConfig.CAFile = configClusterInfo.CertificateAuthority configClientConfig.CAData = configClusterInfo.CertificateAuthorityData configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify configClientConfig.ServerName = configClusterInfo.TLSServerName - mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride) - return mergedConfig, nil + return configClientConfig } -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identification -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible +// getUserIdentificationPartialConfig extracts user identification information from configAuthInfo +// (the final result of command line flags and merged .kubeconfig files); +// if the information available there is insufficient, it prompts (if possible) for additional information. func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { mergedConfig := &restclient.Config{} @@ -338,8 +328,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) previouslyMergedConfig := mergedConfig mergedConfig = &restclient.Config{} - mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride) - mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride) + if err := merge(mergedConfig, promptedConfig); err != nil { + return nil, err + } + if err := merge(mergedConfig, previouslyMergedConfig); err != nil { + return nil, err + } config.promptedCredentials.username = mergedConfig.Username config.promptedCredentials.password = mergedConfig.Password } @@ -347,7 +341,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI return mergedConfig, nil } -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { config := &restclient.Config{} config.Username = info.User @@ -507,12 +501,16 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { mergedContext := clientcmdapi.NewContext() if configContext, exists := contexts[contextName]; exists { - mergo.Merge(mergedContext, configContext, mergo.WithOverride) + if err := merge(mergedContext, configContext); err != nil { + return clientcmdapi.Context{}, err + } } else if required { return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) } if config.overrides != nil { - mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride) + if err := merge(mergedContext, &config.overrides.Context); err != nil { + return clientcmdapi.Context{}, err + } } return *mergedContext, nil @@ -525,12 +523,16 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { mergedAuthInfo := clientcmdapi.NewAuthInfo() if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride) + if err := merge(mergedAuthInfo, configAuthInfo); err != nil { + return clientcmdapi.AuthInfo{}, err + } } else if required { return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) } if config.overrides != nil { - mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride) + if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil { + return clientcmdapi.AuthInfo{}, err + } } return *mergedAuthInfo, nil @@ -543,15 +545,21 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { mergedClusterInfo := clientcmdapi.NewCluster() if config.overrides != nil { - mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride) + if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil { + return clientcmdapi.Cluster{}, err + } } if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride) + if err := merge(mergedClusterInfo, configClusterInfo); err != nil { + return clientcmdapi.Cluster{}, err + } } else if required { return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) } if config.overrides != nil { - mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride) + if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil { + return clientcmdapi.Cluster{}, err + } } // * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index b75737f1c..c900e5fd1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -24,7 +24,6 @@ import ( goruntime "runtime" "strings" - "github.com/imdario/mergo" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" @@ -248,7 +247,9 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { mapConfig := clientcmdapi.NewConfig() for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride) + if err := merge(mapConfig, kubeconfig); err != nil { + return nil, err + } } // merge all of the struct values in the reverse order so that priority is given correctly @@ -256,14 +257,20 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { nonMapConfig := clientcmdapi.NewConfig() for i := len(kubeconfigs) - 1; i >= 0; i-- { kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride) + if err := merge(nonMapConfig, kubeconfig); err != nil { + return nil, err + } } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // get the values we expect. config := clientcmdapi.NewConfig() - mergo.Merge(config, mapConfig, mergo.WithOverride) - mergo.Merge(config, nonMapConfig, mergo.WithOverride) + if err := merge(config, mapConfig); err != nil { + return nil, err + } + if err := merge(config, nonMapConfig); err != nil { + return nil, err + } if rules.ResolvePaths() { if err := ResolveLocalPaths(config); err != nil { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merge.go b/vendor/k8s.io/client-go/tools/clientcmd/merge.go new file mode 100644 index 000000000..3d74e6029 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/merge.go @@ -0,0 +1,121 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "reflect" + "strings" +) + +// recursively merges src into dst: +// - non-pointer struct fields with any exported fields are recursively merged +// - non-pointer struct fields with only unexported fields prefer src if the field is non-zero +// - maps are shallow merged with src keys taking priority over dst +// - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops +func merge[T any](dst, src *T) error { + if dst == nil { + return fmt.Errorf("cannot merge into nil pointer") + } + if src == nil { + return nil + } + return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem()) +} + +func mergeValues(fieldNames []string, dst, src reflect.Value) error { + dstType := dst.Type() + // no-op if we can't read the src + if !src.IsValid() { + return nil + } + // sanity check types match + if srcType := src.Type(); dstType != srcType { + return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, ".")) + } + + switch dstType.Kind() { + case reflect.Struct: + if hasExportedField(dstType) { + // recursively merge + for i, n := 0, dstType.NumField(); i < n; i++ { + if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil { + return err + } + } + } else if dst.CanSet() { + // If all fields are unexported, overwrite with src. + // Using src.IsZero() would make more sense but that's not what mergo did. + dst.Set(src) + } + + case reflect.Map: + if dst.CanSet() && !src.IsZero() { + // initialize dst if needed + if dst.IsZero() { + dst.Set(reflect.MakeMap(dstType)) + } + // shallow-merge overwriting dst keys with src keys + for _, mapKey := range src.MapKeys() { + dst.SetMapIndex(mapKey, src.MapIndex(mapKey)) + } + } + + case reflect.Slice: + if dst.CanSet() && src.Len() > 0 { + // overwrite dst with non-empty src slice + dst.Set(src) + } + + case reflect.Pointer: + if dst.CanSet() && !src.IsZero() { + // overwrite dst with non-zero values for other types + if dstType.Elem().Kind() == reflect.Struct { + // use struct pointer as-is + dst.Set(src) + } else { + // shallow-copy non-struct pointer (interfaces, primitives, etc) + dst.Set(reflect.New(dstType.Elem())) + dst.Elem().Set(src.Elem()) + } + } + + default: + if dst.CanSet() && !src.IsZero() { + // overwrite dst with non-zero values for other types + dst.Set(src) + } + } + + return nil +} + +// hasExportedField returns true if the given type has any exported fields, +// or if it has any anonymous/embedded struct fields with exported fields +func hasExportedField(dstType reflect.Type) bool { + for i, n := 0, dstType.NumField(); i < n; i++ { + field := dstType.Field(i) + if field.Anonymous && field.Type.Kind() == reflect.Struct { + if hasExportedField(dstType.Field(i).Type) { + return true + } + } else if len(field.PkgPath) == 0 { + return true + } + } + return false +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS index 908bdacdf..70787f2b5 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -2,10 +2,12 @@ approvers: - mikedanese + - jefftree reviewers: - wojtek-t - deads2k - mikedanese - ingvagabund + - jefftree emeritus_approvers: - timothysc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index d9d87d55f..c3c1d9be1 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -173,7 +173,10 @@ type LeaderElectionConfig struct { type LeaderCallbacks struct { // OnStartedLeading is called when a LeaderElector client starts leading OnStartedLeading func(context.Context) - // OnStoppedLeading is called when a LeaderElector client stops leading + // OnStoppedLeading is called when a LeaderElector client stops leading. + // This callback is always called when the LeaderElector exits, even if it did not start leading. + // Users should not assume that OnStoppedLeading is only called after OnStartedLeading. + // see: https://github.com/kubernetes/kubernetes/pull/127675#discussion_r1780059887 OnStoppedLeading func() // OnNewLeader is called when the client observes a leader that is // not the previously observed leader. This includes the first observed @@ -277,16 +280,13 @@ func (le *LeaderElector) renew(ctx context.Context) { ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) - defer timeoutCancel() - err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, le.config.RetryPeriod, le.config.RenewDeadline, true, func(ctx context.Context) (done bool, err error) { if !le.config.Coordinated { - return le.tryAcquireOrRenew(timeoutCtx), nil + return le.tryAcquireOrRenew(ctx), nil } else { - return le.tryCoordinatedRenew(timeoutCtx), nil + return le.tryCoordinatedRenew(ctx), nil } - }, timeoutCtx.Done()) - + }) le.maybeReportTransition() desc := le.config.Lock.Describe() if err == nil { @@ -426,7 +426,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { le.setObservedRecord(&leaderElectionRecord) return true } - klog.Errorf("Failed to update lock optimitically: %v, falling back to slow path", err) + klog.Errorf("Failed to update lock optimistically: %v, falling back to slow path", err) } // 2. obtain or create the ElectionRecord diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go index 74cf5bb5c..6ccd4cfbe 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go @@ -22,14 +22,14 @@ import ( "time" v1 "k8s.io/api/coordination/v1" - v1alpha1 "k8s.io/api/coordination/v1alpha1" + v1alpha2 "k8s.io/api/coordination/v1alpha2" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + coordinationv1alpha2client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -43,7 +43,7 @@ type CacheSyncWaiter interface { } type LeaseCandidate struct { - leaseClient coordinationv1alpha1client.LeaseCandidateInterface + leaseClient coordinationv1alpha2client.LeaseCandidateInterface leaseCandidateInformer cache.SharedIndexInformer informerFactory informers.SharedInformerFactory hasSynced cache.InformerSynced @@ -60,7 +60,7 @@ type LeaseCandidate struct { clock clock.Clock binaryVersion, emulationVersion string - preferredStrategies []v1.CoordinatedLeaseStrategy + strategy v1.CoordinatedLeaseStrategy } // NewCandidate creates new LeaseCandidate controller that creates a @@ -73,7 +73,7 @@ func NewCandidate(clientset kubernetes.Interface, candidateName string, targetLease string, binaryVersion, emulationVersion string, - preferredStrategies []v1.CoordinatedLeaseStrategy, + strategy v1.CoordinatedLeaseStrategy, ) (*LeaseCandidate, CacheSyncWaiter, error) { fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String() // A separate informer factory is required because this must start before informerFactories @@ -84,10 +84,10 @@ func NewCandidate(clientset kubernetes.Interface, options.FieldSelector = fieldSelector }), ) - leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer() + leaseCandidateInformer := informerFactory.Coordination().V1alpha2().LeaseCandidates().Informer() lc := &LeaseCandidate{ - leaseClient: clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace), + leaseClient: clientset.CoordinationV1alpha2().LeaseCandidates(candidateNamespace), leaseCandidateInformer: leaseCandidateInformer, informerFactory: informerFactory, name: candidateName, @@ -96,13 +96,13 @@ func NewCandidate(clientset kubernetes.Interface, clock: clock.RealClock{}, binaryVersion: binaryVersion, emulationVersion: emulationVersion, - preferredStrategies: preferredStrategies, + strategy: strategy, } lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"}) h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { - if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok { + if leasecandidate, ok := newObj.(*v1alpha2.LeaseCandidate); ok { if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) { lc.enqueueLease() } @@ -184,17 +184,17 @@ func (c *LeaseCandidate) ensureLease(ctx context.Context) error { return nil } -func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate { - lc := &v1alpha1.LeaseCandidate{ +func (c *LeaseCandidate) newLeaseCandidate() *v1alpha2.LeaseCandidate { + lc := &v1alpha2.LeaseCandidate{ ObjectMeta: metav1.ObjectMeta{ Name: c.name, Namespace: c.namespace, }, - Spec: v1alpha1.LeaseCandidateSpec{ - LeaseName: c.leaseName, - BinaryVersion: c.binaryVersion, - EmulationVersion: c.emulationVersion, - PreferredStrategies: c.preferredStrategies, + Spec: v1alpha2.LeaseCandidateSpec{ + LeaseName: c.leaseName, + BinaryVersion: c.binaryVersion, + EmulationVersion: c.emulationVersion, + Strategy: c.strategy, }, } lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 053a7570d..2a1eb9caa 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -35,74 +35,8 @@ const ( endpointsResourceLock = "endpoints" configMapsResourceLock = "configmaps" LeasesResourceLock = "leases" - // When using endpointsLeasesResourceLock, you need to ensure that - // API Priority & Fairness is configured with non-default flow-schema - // that will catch the necessary operations on leader-election related - // endpoint objects. - // - // The example of such flow scheme could look like this: - // apiVersion: flowcontrol.apiserver.k8s.io/v1beta2 - // kind: FlowSchema - // metadata: - // name: my-leader-election - // spec: - // distinguisherMethod: - // type: ByUser - // matchingPrecedence: 200 - // priorityLevelConfiguration: - // name: leader-election # reference the PL - // rules: - // - resourceRules: - // - apiGroups: - // - "" - // namespaces: - // - '*' - // resources: - // - endpoints - // verbs: - // - get - // - create - // - update - // subjects: - // - kind: ServiceAccount - // serviceAccount: - // name: '*' - // namespace: kube-system - endpointsLeasesResourceLock = "endpointsleases" - // When using configMapsLeasesResourceLock, you need to ensure that - // API Priority & Fairness is configured with non-default flow-schema - // that will catch the necessary operations on leader-election related - // configmap objects. - // - // The example of such flow scheme could look like this: - // apiVersion: flowcontrol.apiserver.k8s.io/v1beta2 - // kind: FlowSchema - // metadata: - // name: my-leader-election - // spec: - // distinguisherMethod: - // type: ByUser - // matchingPrecedence: 200 - // priorityLevelConfiguration: - // name: leader-election # reference the PL - // rules: - // - resourceRules: - // - apiGroups: - // - "" - // namespaces: - // - '*' - // resources: - // - configmaps - // verbs: - // - get - // - create - // - update - // subjects: - // - kind: ServiceAccount - // serviceAccount: - // name: '*' - // namespace: kube-system - configMapsLeasesResourceLock = "configmapsleases" + endpointsLeasesResourceLock = "endpointsleases" + configMapsLeasesResourceLock = "configmapsleases" ) // LeaderElectionRecord is the record that is stored in the leader election annotation. @@ -177,9 +111,9 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf } switch lockType { case endpointsResourceLock: - return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock) + return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", LeasesResourceLock) case configMapsResourceLock: - return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock) + return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", LeasesResourceLock) case LeasesResourceLock: return leaseLock, nil case endpointsLeasesResourceLock: diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go index abba06362..170074d4b 100644 --- a/vendor/k8s.io/client-go/tools/record/events_cache.go +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -23,14 +23,13 @@ import ( "sync" "time" - "github.com/golang/groupcache/lru" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/util/flowcontrol" "k8s.io/utils/clock" + "k8s.io/utils/lru" ) const ( @@ -77,6 +76,7 @@ func getSpamKey(event *v1.Event) string { event.InvolvedObject.Name, string(event.InvolvedObject.UID), event.InvolvedObject.APIVersion, + event.Type, }, "") } @@ -90,8 +90,6 @@ type EventFilterFunc func(event *v1.Event) bool // EventSourceObjectSpamFilter is responsible for throttling // the amount of events a source and object can produce. type EventSourceObjectSpamFilter struct { - sync.RWMutex - // the cache that manages last synced state cache *lru.Cache @@ -133,8 +131,6 @@ func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { eventKey := f.spamKeyFunc(event) // do we have a record of similar events in our cache? - f.Lock() - defer f.Unlock() value, found := f.cache.Get(eventKey) if found { record = value.(spamRecord) diff --git a/vendor/k8s.io/client-go/transport/cache_go118.go b/vendor/k8s.io/client-go/transport/cache_go118.go index d21d5137d..babdaf8b5 100644 --- a/vendor/k8s.io/client-go/transport/cache_go118.go +++ b/vendor/k8s.io/client-go/transport/cache_go118.go @@ -18,7 +18,29 @@ limitations under the License. package transport +// this is just to make the "unused" linter rule happy +var _ = isCacheKeyComparable[tlsCacheKey] + // assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime. -var _ = isComparable[tlsCacheKey] +// +// Golang 1.20 introduced an exception to type constraints that allows comparable, but not +// necessarily strictly comparable type arguments to satisfy the `comparable` type constraint, +// thus allowing interfaces to fulfil the `comparable` constraint. +// However, by definition, "A comparison of two interface values with identical +// dynamic types causes a run-time panic if that type is not comparable". +// +// We want to make sure that comparing two `tlsCacheKey` elements won't cause a +// runtime panic. In order to do that, we'll force the `tlsCacheKey` to be strictly +// comparable, thus making it impossible for it to contain interfaces. +// To assert strict comparability, we'll use another definition: "Type +// parameters are comparable if they are strictly comparable". +// Below, we first construct a type parameter from the `tlsCacheKey` type so that +// we can then push this type parameter to a comparable check, thus checking these +// are strictly comparable. +// +// Original suggestion from https://github.com/golang/go/issues/56548#issuecomment-1317673963 +func isCacheKeyComparable[K tlsCacheKey]() { + _ = isComparable[K] +} func isComparable[T comparable]() {} diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index e2d1dcc9a..52fefb531 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -86,6 +86,7 @@ func DebugWrappers(rt http.RoundTripper) http.RoundTripper { type authProxyRoundTripper struct { username string + uid string groups []string extra map[string][]string @@ -98,15 +99,17 @@ var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{} // authentication terminating proxy cases // assuming you pull the user from the context: // username is the user.Info.GetName() of the user +// uid is the user.Info.GetUID() of the user // groups is the user.Info.GetGroups() of the user // extra is the user.Info.GetExtra() of the user // extra can contain any additional information that the authenticator // thought was interesting, for example authorization scopes. // In order to faithfully round-trip through an impersonation flow, these keys // MUST be lowercase. -func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { +func NewAuthProxyRoundTripper(username, uid string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { return &authProxyRoundTripper{ username: username, + uid: uid, groups: groups, extra: extra, rt: rt, @@ -115,14 +118,15 @@ func NewAuthProxyRoundTripper(username string, groups []string, extra map[string func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { req = utilnet.CloneRequest(req) - SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) + SetAuthProxyHeaders(req, rt.username, rt.uid, rt.groups, rt.extra) return rt.rt.RoundTrip(req) } // SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument. -func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) { +func SetAuthProxyHeaders(req *http.Request, username, uid string, groups []string, extra map[string][]string) { req.Header.Del("X-Remote-User") + req.Header.Del("X-Remote-Uid") req.Header.Del("X-Remote-Group") for key := range req.Header { if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) { @@ -131,6 +135,9 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } req.Header.Set("X-Remote-User", username) + if len(uid) > 0 { + req.Header.Set("X-Remote-Uid", uid) + } for _, group := range groups { req.Header.Add("X-Remote-Group", group) } diff --git a/vendor/k8s.io/client-go/util/apply/apply.go b/vendor/k8s.io/client-go/util/apply/apply.go new file mode 100644 index 000000000..0cc85df6c --- /dev/null +++ b/vendor/k8s.io/client-go/util/apply/apply.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/features" + "k8s.io/client-go/rest" +) + +// NewRequest builds a new server-side apply request. The provided apply configuration object will +// be marshalled to the request's body using the default encoding, and the Content-Type header will +// be set to application/apply-patch with the appropriate structured syntax name suffix (today, +// either +yaml or +cbor, see +// https://www.iana.org/assignments/media-type-structured-suffix/media-type-structured-suffix.xhtml). +func NewRequest(client rest.Interface, applyConfiguration interface{}) (*rest.Request, error) { + pt := types.ApplyYAMLPatchType + marshal := json.Marshal + + if features.FeatureGates().Enabled(features.ClientsAllowCBOR) && features.FeatureGates().Enabled(features.ClientsPreferCBOR) { + pt = types.ApplyCBORPatchType + marshal = cbor.Marshal + } + + body, err := marshal(applyConfiguration) + if err != nil { + return nil, fmt.Errorf("failed to marshal apply configuration: %w", err) + } + + return client.Patch(pt).Body(body), nil +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go index 7610c05c2..61b8fe28b 100644 --- a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go +++ b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go @@ -32,6 +32,12 @@ func init() { dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR")) } +// IsDataConsistencyDetectionForListEnabled returns true when +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +func IsDataConsistencyDetectionForListEnabled() bool { + return dataConsistencyDetectionForListFromCacheEnabled +} + // CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when // the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup // for requests that have a high chance of being served from the watch-cache. @@ -50,7 +56,7 @@ func init() { // the cache (even though this might not be true for some requests) // and issue the second call to get data from etcd for comparison. func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { - if !dataConsistencyDetectionForListFromCacheEnabled { + if !IsDataConsistencyDetectionForListEnabled() { return } checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 82e4c4c40..899b8e34e 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -32,7 +32,12 @@ type backoffEntry struct { type Backoff struct { sync.RWMutex - Clock clock.Clock + Clock clock.Clock + // HasExpiredFunc controls the logic that determines whether the backoff + // counter should be reset, and when to GC old backoff entries. If nil, the + // default hasExpired function will restart the backoff factor to the + // beginning after observing time has passed at least equal to 2*maxDuration + HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry @@ -93,7 +98,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) { p.Lock() defer p.Unlock() entry, ok := p.perItemBackoff[id] - if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { entry = p.initEntryUnsafe(id) entry.backoff += p.jitter(entry.backoff) } else { @@ -119,7 +124,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { if !ok { return false } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } return p.Clock.Since(eventTime) < entry.backoff @@ -133,21 +138,21 @@ func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { if !ok { return false } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } return eventTime.Sub(entry.lastUpdate) < entry.backoff } -// Garbage collect records that have aged past maxDuration. Backoff users are expected -// to invoke this periodically. +// Garbage collect records that have aged past their expiration, which defaults +// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke +// this periodically. func (p *Backoff) GC() { p.Lock() defer p.Unlock() now := p.Clock.Now() for id, entry := range p.perItemBackoff { - if now.Sub(entry.lastUpdate) > p.maxDuration*2 { - // GC when entry has not been updated for 2*maxDuration + if p.hasExpired(now, entry.lastUpdate, p.maxDuration) { delete(p.perItemBackoff, id) } } @@ -174,7 +179,10 @@ func (p *Backoff) jitter(delay time.Duration) time.Duration { return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay)) } -// After 2*maxDuration we restart the backoff factor to the beginning -func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { +// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning +func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + if p.HasExpiredFunc != nil { + return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration) + } return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 958b96a80..e33a6c692 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -64,26 +64,33 @@ type TypedDelayingQueueConfig[T comparable] struct { // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewDelayingQueueWithConfig instead and specify a name. // -// Deprecated: use TypedNewDelayingQueue instead. +// Deprecated: use NewTypedDelayingQueue instead. func NewDelayingQueue() DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{}) } -// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability. -// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use -// TypedNewDelayingQueueWithConfig instead and specify a name. -func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { +// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability. +// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedDelayingQueueWithConfig instead and specify a name. +func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] { return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) } // NewDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. // -// Deprecated: use TypedNewDelayingQueueWithConfig instead. +// Deprecated: use NewTypedDelayingQueueWithConfig instead. func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { return NewTypedDelayingQueueWithConfig[any](config) } +// TypedNewDelayingQueue exists for backwards compatibility only. +// +// Deprecated: use NewTypedDelayingQueueWithConfig instead. +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueue[T]() +} + // NewTypedDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { @@ -134,7 +141,7 @@ func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), + waitingForAddCh: make(chan *waitFor[T], 1000), metrics: newRetryMetrics(name, provider), } @@ -158,15 +165,15 @@ type delayingType[T comparable] struct { heartbeat clock.Ticker // waitingForAddCh is a buffered channel that feeds waitingForAdd - waitingForAddCh chan *waitFor + waitingForAddCh chan *waitFor[T] // metrics counts the number of retries metrics retryMetrics } // waitFor holds the data to add and the time it should be added -type waitFor struct { - data t +type waitFor[T any] struct { + data T readyAt time.Time // index in the priority queue (heap) index int @@ -180,15 +187,15 @@ type waitFor struct { // it has been removed from the queue and placed at index Len()-1 by // container/heap. Push adds an item at index Len(), and container/heap // percolates it into the correct location. -type waitForPriorityQueue []*waitFor +type waitForPriorityQueue[T any] []*waitFor[T] -func (pq waitForPriorityQueue) Len() int { +func (pq waitForPriorityQueue[T]) Len() int { return len(pq) } -func (pq waitForPriorityQueue) Less(i, j int) bool { +func (pq waitForPriorityQueue[T]) Less(i, j int) bool { return pq[i].readyAt.Before(pq[j].readyAt) } -func (pq waitForPriorityQueue) Swap(i, j int) { +func (pq waitForPriorityQueue[T]) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] pq[i].index = i pq[j].index = j @@ -196,16 +203,16 @@ func (pq waitForPriorityQueue) Swap(i, j int) { // Push adds an item to the queue. Push should not be called directly; instead, // use `heap.Push`. -func (pq *waitForPriorityQueue) Push(x interface{}) { +func (pq *waitForPriorityQueue[T]) Push(x interface{}) { n := len(*pq) - item := x.(*waitFor) + item := x.(*waitFor[T]) item.index = n *pq = append(*pq, item) } // Pop removes an item from the queue. Pop should not be called directly; // instead, use `heap.Pop`. -func (pq *waitForPriorityQueue) Pop() interface{} { +func (pq *waitForPriorityQueue[T]) Pop() interface{} { n := len(*pq) item := (*pq)[n-1] item.index = -1 @@ -215,7 +222,7 @@ func (pq *waitForPriorityQueue) Pop() interface{} { // Peek returns the item at the beginning of the queue, without removing the // item or otherwise mutating the queue. It is safe to call directly. -func (pq waitForPriorityQueue) Peek() interface{} { +func (pq waitForPriorityQueue[T]) Peek() interface{} { return pq[0] } @@ -247,7 +254,7 @@ func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { select { case <-q.stopCh: // unblock if ShutDown() is called - case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}: } } @@ -266,10 +273,10 @@ func (q *delayingType[T]) waitingLoop() { // Make a timer that expires when the item at the head of the waiting queue is ready var nextReadyAtTimer clock.Timer - waitingForQueue := &waitForPriorityQueue{} + waitingForQueue := &waitForPriorityQueue[T]{} heap.Init(waitingForQueue) - waitingEntryByData := map[t]*waitFor{} + waitingEntryByData := map[T]*waitFor[T]{} for { if q.TypedInterface.ShuttingDown() { @@ -280,13 +287,13 @@ func (q *delayingType[T]) waitingLoop() { // Add ready entries for waitingForQueue.Len() > 0 { - entry := waitingForQueue.Peek().(*waitFor) + entry := waitingForQueue.Peek().(*waitFor[T]) if entry.readyAt.After(now) { break } - entry = heap.Pop(waitingForQueue).(*waitFor) - q.Add(entry.data.(T)) + entry = heap.Pop(waitingForQueue).(*waitFor[T]) + q.Add(entry.data) delete(waitingEntryByData, entry.data) } @@ -296,7 +303,7 @@ func (q *delayingType[T]) waitingLoop() { if nextReadyAtTimer != nil { nextReadyAtTimer.Stop() } - entry := waitingForQueue.Peek().(*waitFor) + entry := waitingForQueue.Peek().(*waitFor[T]) nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) nextReadyAt = nextReadyAtTimer.C() } @@ -315,7 +322,7 @@ func (q *delayingType[T]) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data.(T)) + q.Add(waitEntry.data) } drained := false @@ -325,7 +332,7 @@ func (q *delayingType[T]) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data.(T)) + q.Add(waitEntry.data) } default: drained = true @@ -336,7 +343,7 @@ func (q *delayingType[T]) waitingLoop() { } // insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue -func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { +func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) { // if the entry already exists, update the time only if it would cause the item to be queued sooner existing, exists := knownEntries[entry.data] if exists { diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index f012ccc55..4400cb65e 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -26,10 +26,10 @@ import ( // This file provides abstractions for setting the provider (e.g., prometheus) // of metrics. -type queueMetrics interface { - add(item t) - get(item t) - done(item t) +type queueMetrics[T comparable] interface { + add(item T) + get(item T) + done(item T) updateUnfinishedWork() } @@ -70,7 +70,7 @@ func (noopMetric) Set(float64) {} func (noopMetric) Observe(float64) {} // defaultQueueMetrics expects the caller to lock before setting any metrics. -type defaultQueueMetrics struct { +type defaultQueueMetrics[T comparable] struct { clock clock.Clock // current depth of a workqueue @@ -81,15 +81,15 @@ type defaultQueueMetrics struct { latency HistogramMetric // how long processing an item from a workqueue takes workDuration HistogramMetric - addTimes map[t]time.Time - processingStartTimes map[t]time.Time + addTimes map[T]time.Time + processingStartTimes map[T]time.Time // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric } -func (m *defaultQueueMetrics) add(item t) { +func (m *defaultQueueMetrics[T]) add(item T) { if m == nil { return } @@ -101,7 +101,7 @@ func (m *defaultQueueMetrics) add(item t) { } } -func (m *defaultQueueMetrics) get(item t) { +func (m *defaultQueueMetrics[T]) get(item T) { if m == nil { return } @@ -114,7 +114,7 @@ func (m *defaultQueueMetrics) get(item t) { } } -func (m *defaultQueueMetrics) done(item t) { +func (m *defaultQueueMetrics[T]) done(item T) { if m == nil { return } @@ -125,7 +125,7 @@ func (m *defaultQueueMetrics) done(item t) { } } -func (m *defaultQueueMetrics) updateUnfinishedWork() { +func (m *defaultQueueMetrics[T]) updateUnfinishedWork() { // Note that a summary metric would be better for this, but prometheus // doesn't seem to have non-hacky ways to reset the summary metrics. var total float64 @@ -141,15 +141,15 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() { m.longestRunningProcessor.Set(oldest) } -type noMetrics struct{} +type noMetrics[T any] struct{} -func (noMetrics) add(item t) {} -func (noMetrics) get(item t) {} -func (noMetrics) done(item t) {} -func (noMetrics) updateUnfinishedWork() {} +func (noMetrics[T]) add(item T) {} +func (noMetrics[T]) get(item T) {} +func (noMetrics[T]) done(item T) {} +func (noMetrics[T]) updateUnfinishedWork() {} // Gets the time since the specified start in seconds. -func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 { +func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 { return m.clock.Since(start).Seconds() } @@ -210,28 +210,15 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -var globalMetricsFactory = queueMetricsFactory{ - metricsProvider: noopMetricsProvider{}, -} - -type queueMetricsFactory struct { - metricsProvider MetricsProvider +var globalMetricsProvider MetricsProvider = noopMetricsProvider{} - onlyOnce sync.Once -} +var setGlobalMetricsProviderOnce sync.Once -func (f *queueMetricsFactory) setProvider(mp MetricsProvider) { - f.onlyOnce.Do(func() { - f.metricsProvider = mp - }) -} - -func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics { - mp := f.metricsProvider +func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] { if len(name) == 0 || mp == (noopMetricsProvider{}) { - return noMetrics{} + return noMetrics[T]{} } - return &defaultQueueMetrics{ + return &defaultQueueMetrics[T]{ clock: clock, depth: mp.NewDepthMetric(name), adds: mp.NewAddsMetric(name), @@ -239,8 +226,8 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu workDuration: mp.NewWorkDurationMetric(name), unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + addTimes: map[T]time.Time{}, + processingStartTimes: map[T]time.Time{}, } } @@ -251,7 +238,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { } if provider == nil { - provider = globalMetricsFactory.metricsProvider + provider = globalMetricsProvider } return &defaultRetryMetrics{ @@ -262,5 +249,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { - globalMetricsFactory.setProvider(metricsProvider) + setGlobalMetricsProviderOnce.Do(func() { + globalMetricsProvider = metricsProvider + }) } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index ff715482c..3cec1768a 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -138,13 +138,9 @@ func NewNamed(name string) *Type { // newQueueWithConfig constructs a new named workqueue // with the ability to customize different properties for testing purposes func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { - var metricsFactory *queueMetricsFactory + metricsProvider := globalMetricsProvider if config.MetricsProvider != nil { - metricsFactory = &queueMetricsFactory{ - metricsProvider: config.MetricsProvider, - } - } else { - metricsFactory = &globalMetricsFactory + metricsProvider = config.MetricsProvider } if config.Clock == nil { @@ -158,12 +154,12 @@ func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod t return newQueue( config.Clock, config.Queue, - metricsFactory.newQueueMetrics(config.Name, config.Clock), + newQueueMetrics[T](metricsProvider, config.Name, config.Clock), updatePeriod, ) } -func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] { +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] { t := &Typed[T]{ clock: c, queue: queue, @@ -176,7 +172,7 @@ func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMet // Don't start the goroutine for a type of noMetrics so we don't consume // resources unnecessarily - if _, ok := metrics.(noMetrics); !ok { + if _, ok := metrics.(noMetrics[T]); !ok { go t.updateUnfinishedWorkLoop() } @@ -209,14 +205,13 @@ type Typed[t comparable] struct { shuttingDown bool drain bool - metrics queueMetrics + metrics queueMetrics[t] unfinishedWorkUpdatePeriod time.Duration clock clock.WithTicker } type empty struct{} -type t interface{} type set[t comparable] map[t]empty func (s set[t]) has(item t) bool { diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go index 89f7ed4f0..9b6aa7cec 100644 --- a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go @@ -19,6 +19,7 @@ package generators import ( "io" "path" + "slices" "strings" "k8s.io/gengo/v2/generator" @@ -114,7 +115,7 @@ func (g *applyConfigurationGenerator) GenerateType(c *generator.Context, t *type sw.Do(constructor, typeParams) } } - g.generateWithFuncs(t, typeParams, sw, nil) + g.generateWithFuncs(t, typeParams, sw, nil, &[]string{}) g.generateGetters(t, typeParams, sw, nil) return sw.Error() } @@ -176,7 +177,8 @@ func (g *applyConfigurationGenerator) generateGetters(t *types.Type, typeParams } } -func (g *applyConfigurationGenerator) generateWithFuncs(t *types.Type, typeParams TypeParams, sw *generator.SnippetWriter, embed *memberParams) { +func (g *applyConfigurationGenerator) generateWithFuncs(t *types.Type, typeParams TypeParams, sw *generator.SnippetWriter, embed *memberParams, + generated *[]string) { for _, member := range t.Members { if blocklisted(t, member) { continue @@ -186,6 +188,11 @@ func (g *applyConfigurationGenerator) generateWithFuncs(t *types.Type, typeParam memberType = &types.Type{Kind: types.Pointer, Elem: memberType} } if jsonTags, ok := lookupJSONTags(member); ok { + if slices.Contains(*generated, member.Name) { + klog.V(5).Infof("With%s already generated on %s, skipping\n", member.Name, t.Name) + continue + } + *generated = append(*generated, member.Name) memberParams := memberParams{ TypeParams: typeParams, Member: member, @@ -194,7 +201,7 @@ func (g *applyConfigurationGenerator) generateWithFuncs(t *types.Type, typeParam EmbeddedIn: embed, } if memberParams.Member.Embedded { - g.generateWithFuncs(member.Type, typeParams, sw, &memberParams) + g.generateWithFuncs(member.Type, typeParams, sw, &memberParams, generated) if !jsonTags.inline { // non-inlined embeds are nillable and need a "ensure exists" utility function sw.Do(ensureEmbedExists, memberParams) @@ -281,9 +288,9 @@ func (g *applyConfigurationGenerator) generateMemberWith(sw *generator.SnippetWr sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(value $.MemberType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) g.ensureEmbedExistsIfApplicable(sw, memberParams) if g.refGraph.isApplyConfig(memberParams.Member.Type) || isNillable(memberParams.Member.Type) { - sw.Do("b.$.Member.Name$ = value\n", memberParams) + sw.Do("b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = value\n", memberParams) } else { - sw.Do("b.$.Member.Name$ = &value\n", memberParams) + sw.Do("b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = &value\n", memberParams) } sw.Do(" return b\n", memberParams) sw.Do("}\n", memberParams) @@ -297,7 +304,7 @@ func (g *applyConfigurationGenerator) generateMemberGetter(sw *generator.Snippet sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) Get$.Member.Name$() *$.MemberType|raw$ {\n", memberParams) } g.ensureEmbedExistsIfApplicable(sw, memberParams) - sw.Do(" return b.$.Member.Name$\n", memberParams) + sw.Do(" return b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$\n", memberParams) sw.Do("}\n", memberParams) } @@ -324,15 +331,15 @@ func (g *applyConfigurationGenerator) generateMemberWithForSlice(sw *generator.S sw.Do("}\n", memberParams) if memberIsPointerToSlice { - sw.Do("*b.$.Member.Name$ = append(*b.$.Member.Name$, *values[i])\n", memberParams) + sw.Do("*b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = append(*b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$, *values[i])\n", memberParams) } else { - sw.Do("b.$.Member.Name$ = append(b.$.Member.Name$, *values[i])\n", memberParams) + sw.Do("b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = append(b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$, *values[i])\n", memberParams) } } else { if memberIsPointerToSlice { - sw.Do("*b.$.Member.Name$ = append(*b.$.Member.Name$, values[i])\n", memberParams) + sw.Do("*b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = append(*b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$, values[i])\n", memberParams) } else { - sw.Do("b.$.Member.Name$ = append(b.$.Member.Name$, values[i])\n", memberParams) + sw.Do("b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = append(b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$, values[i])\n", memberParams) } } sw.Do(" }\n", memberParams) @@ -347,11 +354,11 @@ func (g *applyConfigurationGenerator) generateMemberWithForMap(sw *generator.Sni sw.Do("// overwriting an existing map entries in $.Member.Name$ field with the same key.\n", memberParams) sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(entries $.MemberType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) g.ensureEmbedExistsIfApplicable(sw, memberParams) - sw.Do(" if b.$.Member.Name$ == nil && len(entries) > 0 {\n", memberParams) - sw.Do(" b.$.Member.Name$ = make($.MemberType|raw$, len(entries))\n", memberParams) + sw.Do(" if b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ == nil && len(entries) > 0 {\n", memberParams) + sw.Do(" b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$ = make($.MemberType|raw$, len(entries))\n", memberParams) sw.Do(" }\n", memberParams) sw.Do(" for k, v := range entries {\n", memberParams) - sw.Do(" b.$.Member.Name$[k] = v\n", memberParams) + sw.Do(" b$if ne .EmbeddedIn nil$.$.EmbeddedIn.MemberType.Elem.Name.Name$$end$.$.Member.Name$[k] = v\n", memberParams) sw.Do(" }\n", memberParams) sw.Do(" return b\n", memberParams) sw.Do("}\n", memberParams) diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go index abd650100..3ccc7ce83 100644 --- a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go @@ -19,13 +19,11 @@ package generators import ( "io" - "gopkg.in/yaml.v2" - - "k8s.io/kube-openapi/pkg/schemaconv" - "k8s.io/gengo/v2/generator" "k8s.io/gengo/v2/namer" "k8s.io/gengo/v2/types" + "k8s.io/kube-openapi/pkg/schemaconv" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) // utilGenerator generates the ForKind() utility function. @@ -71,11 +69,12 @@ func (g *internalGenerator) GenerateType(c *generator.Context, _ *types.Type, w return err } sw.Do(schemaBlock, map[string]interface{}{ - "schemaYAML": string(schemaYAML), - "smdParser": smdParser, - "smdNewParser": smdNewParser, - "yamlObject": yamlObject, - "yamlUnmarshal": yamlUnmarshal, + "schemaYAML": string(schemaYAML), + "smdParser": smdParser, + "smdNewParser": smdNewParser, + "fmtSprintf": fmtSprintf, + "syncOnce": syncOnce, + "yamlObject": yamlObject, }) return sw.Error() @@ -87,13 +86,13 @@ func Parser() *{{.smdParser|raw}} { var err error parser, err = {{.smdNewParser|raw}}(schemaYAML) if err != nil { - panic(fmt.Sprintf("Failed to parse schema: %v", err)) + panic({{.fmtSprintf|raw}}("Failed to parse schema: %v", err)) } }) return parser } -var parserOnce sync.Once +var parserOnce {{.syncOnce|raw}} var parser *{{.smdParser|raw}} var schemaYAML = {{.yamlObject|raw}}(` + "`{{.schemaYAML}}`" + `) ` diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go index 197e93251..b033d22d8 100644 --- a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go @@ -188,7 +188,7 @@ func targetForApplyConfigurationsPackage(outputDirBase, outputPkgBase, pkgSubdir localPkg: outputPkg, groupVersion: gv, applyConfig: toGenerate, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), refGraph: refs, openAPIType: openAPIType, }) @@ -211,7 +211,7 @@ func targetForUtils(outputDirBase, outputPkgBase string, boilerplate []byte, gro OutputFilename: "utils.go", }, outputPackage: outputPkgBase, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkgBase), groupVersions: groupVersions, typesForGroupVersion: applyConfigsForGroupVersion, groupGoNames: groupGoNames, @@ -236,7 +236,7 @@ func targetForInternal(outputDirBase, outputPkgBase string, boilerplate []byte, OutputFilename: "internal.go", }, outputPackage: outputPkgBase, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), typeModels: models, }) return generators diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go index bc0b54d03..08c53c2a4 100644 --- a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go @@ -19,6 +19,8 @@ package generators import "k8s.io/gengo/v2/types" var ( + fmtSprintf = types.Ref("fmt", "Sprintf") + syncOnce = types.Ref("sync", "Once") applyConfiguration = types.Ref("k8s.io/apimachinery/pkg/runtime", "ApplyConfiguration") groupVersionKind = types.Ref("k8s.io/apimachinery/pkg/runtime/schema", "GroupVersionKind") typeMeta = types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "TypeMeta") @@ -31,5 +33,4 @@ var ( smdParser = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "Parser") testingTypeConverter = types.Ref("k8s.io/client-go/testing", "TypeConverter") yamlObject = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "YAMLObject") - yamlUnmarshal = types.Ref("gopkg.in/yaml.v2", "Unmarshal") ) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go index e0914e69d..5620fc0c2 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -61,6 +61,9 @@ type Args struct { // If non-empty, Apply functions are generated for each type and reference the apply builders. // If empty (""), Apply functions are not generated. ApplyConfigurationPackage string + + // PrefersProtobuf determines if the generated clientset uses protobuf for API requests. + PrefersProtobuf bool } func New() *Args { @@ -99,6 +102,8 @@ func (args *Args) AddFlags(fs *pflag.FlagSet, inputBase string) { "list of comma separated plural exception definitions in Type:PluralizedType form") fs.StringVar(&args.ApplyConfigurationPackage, "apply-configuration-package", args.ApplyConfigurationPackage, "optional package of apply configurations, generated by applyconfiguration-gen, that are required to generate Apply functions for each type in the clientset. By default Apply functions are not generated.") + fs.BoolVar(&args.PrefersProtobuf, "prefers-protobuf", args.PrefersProtobuf, + "when set, client-gen will generate a clientset that uses protobuf for API requests") // support old flags fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-pkg", fs.GetNormalizeFunc())) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 12a4afdbf..c8fa01a8e 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -128,7 +128,7 @@ func DefaultNameSystem() string { return "public" } -func targetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetDir, clientsetPkg string, groupPkgName string, groupGoName string, apiPath string, inputPkg string, applyBuilderPkg string, boilerplate []byte) generator.Target { +func targetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetDir, clientsetPkg string, groupPkgName string, groupGoName string, apiPath string, inputPkg string, applyBuilderPkg string, boilerplate []byte, prefersProtobuf bool) generator.Target { subdir := []string{"typed", strings.ToLower(groupPkgName), strings.ToLower(gv.Version.NonEmpty())} gvDir := filepath.Join(clientsetDir, filepath.Join(subdir...)) gvPkg := path.Join(clientsetPkg, path.Join(subdir...)) @@ -160,8 +160,9 @@ func targetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clie group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, + prefersProtobuf: prefersProtobuf, typeToMatch: t, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(gvPkg), }) } @@ -177,7 +178,7 @@ func targetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clie groupGoName: groupGoName, apiPath: apiPath, types: typeList, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(gvPkg), }) expansionFileName := "generated_expansion.go" @@ -214,7 +215,7 @@ func targetForClientset(args *args.Args, clientsetDir, clientsetPkg string, grou groups: args.Groups, groupGoNames: groupGoNames, clientsetPackage: clientsetPkg, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(clientsetPkg), }, } return generators @@ -260,7 +261,7 @@ NextGroup: OutputPath: schemeDir, Groups: args.Groups, GroupGoNames: groupGoNames, - ImportTracker: generator.NewImportTracker(), + ImportTracker: generator.NewImportTrackerForPackage(schemePkg), CreateRegistry: internalClient, }, } @@ -424,7 +425,7 @@ func GetTargets(context *generator.Context, args *args.Args) []generator.Target targetForGroup( gv, orderer.OrderTypes(types), clientsetDir, clientsetPkg, group.PackageName, groupGoNames[gv], args.ClientsetAPIPath, - inputPath, args.ApplyConfigurationPackage, boilerplate)) + inputPath, args.ApplyConfigurationPackage, boilerplate, args.PrefersProtobuf)) if args.FakeClient { targetList = append(targetList, fake.TargetForGroup(gv, orderer.OrderTypes(types), clientsetDir, clientsetPkg, group.PackageName, groupGoNames[gv], inputPath, args.ApplyConfigurationPackage, boilerplate)) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go index 74c98232c..935efec21 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -58,12 +58,12 @@ func TargetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clie OutputFilename: "fake_" + strings.ToLower(c.Namers["private"].Name(t)) + ".go", }, outputPackage: outputPkg, + realClientPackage: realClientPkg, inputPackage: inputPkg, - group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, typeToMatch: t, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), applyConfigurationPackage: applyBuilderPackage, }) } @@ -74,11 +74,10 @@ func TargetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clie }, outputPackage: outputPkg, realClientPackage: realClientPkg, - group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, types: typeList, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), }) return generators }, @@ -111,7 +110,7 @@ func TargetForClientset(args *args.Args, clientsetDir, clientsetPkg string, appl groups: args.Groups, groupGoNames: groupGoNames, fakeClientsetPackage: clientsetPkg, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(clientsetPkg), realClientsetPackage: clientsetPkg, applyConfigurationPackage: applyConfigurationPkg, }, @@ -123,7 +122,7 @@ func TargetForClientset(args *args.Args, clientsetDir, clientsetPkg string, appl OutputPkg: clientsetPkg, Groups: args.Groups, GroupGoNames: groupGoNames, - ImportTracker: generator.NewImportTracker(), + ImportTracker: generator.NewImportTrackerForPackage(clientsetPkg), PrivateScheme: true, }, } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index 3bf7aa12b..ffb02ebba 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -77,7 +77,6 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { "fakediscovery \"k8s.io/client-go/discovery/fake\"", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/watch", - "k8s.io/apimachinery/pkg/api/meta/testrestmapper", ) return diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go index d9c9b8bac..04c586a0e 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go @@ -34,7 +34,6 @@ type genFakeForGroup struct { generator.GoGenerator outputPackage string // must be a Go import-path realClientPackage string // must be a Go import-path - group string version string groupGoName string // types in this group @@ -78,6 +77,8 @@ func (g *genFakeForGroup) GenerateType(c *generator.Context, t *types.Type, w io "Fake": c.Universe.Type(types.Name{Package: "k8s.io/client-go/testing", Name: "Fake"}), "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), "RESTClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClient"}), + "FakeClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "FakeClient"}), + "NewFakeClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewFakeClient"}), } sw.Do(groupClientTemplate, m) @@ -110,13 +111,13 @@ type Fake$.GroupGoName$$.Version$ struct { var getterImplNamespaced = ` func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$(namespace string) $.realClientPackage$.$.type|public$Interface { - return &Fake$.type|publicPlural${c, namespace} + return newFake$.type|publicPlural$(c, namespace) } ` var getterImplNonNamespaced = ` func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$() $.realClientPackage$.$.type|public$Interface { - return &Fake$.type|publicPlural${c} + return newFake$.type|publicPlural$(c) } ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index 20499cf8c..6c1410039 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -35,7 +35,7 @@ import ( type genFakeForType struct { generator.GoGenerator outputPackage string // Must be a Go import-path - group string + realClientPackage string // Must be a Go import-path version string groupGoName string inputPackage string @@ -61,37 +61,9 @@ func (g *genFakeForType) Imports(c *generator.Context) (imports []string) { return g.imports.ImportLines() } -// Ideally, we'd like genStatus to return true if there is a subresource path -// registered for "status" in the API server, but we do not have that -// information, so genStatus returns true if the type has a status field. -func genStatus(t *types.Type) bool { - // Default to true if we have a Status member - hasStatus := false - for _, m := range t.Members { - if m.Name == "Status" { - hasStatus = true - break - } - } - - tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) - return hasStatus && !tags.NoStatus -} - -// hasObjectMeta returns true if the type has a ObjectMeta field. -func hasObjectMeta(t *types.Type) bool { - for _, m := range t.Members { - if m.Embedded && m.Name == "ObjectMeta" { - return true - } - } - return false -} - // GenerateType makes the body of a file implementing the individual typed client for type t. func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - pkg := path.Base(t.Name.Package) tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) if err != nil { return err @@ -99,30 +71,28 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. const pkgClientGoTesting = "k8s.io/client-go/testing" m := map[string]interface{}{ - "type": t, - "inputType": t, - "resultType": t, - "subresourcePath": "", - "package": pkg, - "Package": namer.IC(pkg), - "namespaced": !tags.NonNamespaced, - "Group": namer.IC(g.group), - "GroupGoName": g.groupGoName, - "Version": namer.IC(g.version), - "version": g.version, - "SchemeGroupVersion": c.Universe.Type(types.Name{Package: t.Name.Package, Name: "SchemeGroupVersion"}), - "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), - "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), - "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), - "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), - "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), - "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), - "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), - "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), - "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), - "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "type": t, + "inputType": t, + "resultType": t, + "subresourcePath": "", + "namespaced": !tags.NonNamespaced, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "realClientInterface": c.Universe.Type(types.Name{Package: g.realClientPackage, Name: t.Name.Name + "Interface"}), + "SchemeGroupVersion": c.Universe.Type(types.Name{Package: t.Name.Package, Name: "SchemeGroupVersion"}), + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "fmtErrorf": c.Universe.Type(types.Name{Package: "fmt", Name: "Errorf"}), + "contextContext": c.Universe.Type(types.Name{Package: "context", Name: "Context"}), "NewRootListActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListActionWithOptions"}), "NewListActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListActionWithOptions"}), @@ -130,8 +100,6 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. "NewGetActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetActionWithOptions"}), "NewRootDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteActionWithOptions"}), "NewDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteActionWithOptions"}), - "NewRootDeleteCollectionActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionActionWithOptions"}), - "NewDeleteCollectionActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionActionWithOptions"}), "NewRootUpdateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateActionWithOptions"}), "NewUpdateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateActionWithOptions"}), "NewRootCreateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateActionWithOptions"}), @@ -144,11 +112,16 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. "NewGetSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetSubresourceActionWithOptions"}), "NewRootGetSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetSubresourceActionWithOptions"}), "NewRootUpdateSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateSubresourceActionWithOptions"}), - "NewRootPatchActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchActionWithOptions"}), - "NewPatchActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchActionWithOptions"}), "NewRootPatchSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchSubresourceActionWithOptions"}), "NewPatchSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchSubresourceActionWithOptions"}), - "ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}), + "FakeClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "FakeClient"}), + "NewFakeClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewFakeClient"}), + "FakeClientWithApply": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "FakeClientWithApply"}), + "NewFakeClientWithApply": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewFakeClientWithApply"}), + "FakeClientWithList": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "FakeClientWithList"}), + "NewFakeClientWithList": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewFakeClientWithList"}), + "FakeClientWithListAndApply": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "FakeClientWithListAndApply"}), + "NewFakeClientWithListAndApply": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewFakeClientWithListAndApply"}), } generateApply := len(g.applyConfigurationPackage) > 0 @@ -158,56 +131,23 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. m["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, gvString), t.Name.Name+"ApplyConfiguration") } - if tags.NonNamespaced { - sw.Do(structNonNamespaced, m) - } else { - sw.Do(structNamespaced, m) - } + listableOrAppliable := noList | noApply - if tags.NoVerbs { - return sw.Error() + if !tags.NoVerbs && tags.HasVerb("list") { + listableOrAppliable |= withList } - sw.Do(resource, m) - sw.Do(kind, m) - if tags.HasVerb("get") { - sw.Do(getTemplate, m) - } - if tags.HasVerb("list") { - if hasObjectMeta(t) { - sw.Do(listUsingOptionsTemplate, m) - } else { - sw.Do(listTemplate, m) - } - } - if tags.HasVerb("watch") { - sw.Do(watchTemplate, m) + if !tags.NoVerbs && tags.HasVerb("apply") && generateApply { + listableOrAppliable |= withApply } - if tags.HasVerb("create") { - sw.Do(createTemplate, m) - } - if tags.HasVerb("update") { - sw.Do(updateTemplate, m) - } - if tags.HasVerb("updateStatus") && genStatus(t) { - sw.Do(updateStatusTemplate, m) - } - if tags.HasVerb("delete") { - sw.Do(deleteTemplate, m) - } - if tags.HasVerb("deleteCollection") { - sw.Do(deleteCollectionTemplate, m) - } - if tags.HasVerb("patch") { - sw.Do(patchTemplate, m) - } - if tags.HasVerb("apply") && generateApply { - sw.Do(applyTemplate, m) - } - if tags.HasVerb("applyStatus") && generateApply && genStatus(t) { - sw.Do(applyStatusTemplate, m) + sw.Do(structType[listableOrAppliable], m) + sw.Do(newStruct[listableOrAppliable], m) + + if tags.NoVerbs { + return sw.Error() } + _, typeGVString := util.ParsePathGroupVersion(g.inputPackage) // generate extended client methods @@ -251,7 +191,6 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. } if e.HasVerb("list") { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) } @@ -305,77 +244,147 @@ func adjustTemplate(name, verbType, template string) string { return strings.ReplaceAll(template, " "+titler.String(verbType), " "+name) } -// template for the struct that implements the type's interface -var structNamespaced = ` -// Fake$.type|publicPlural$ implements $.type|public$Interface -type Fake$.type|publicPlural$ struct { - Fake *Fake$.GroupGoName$$.Version$ - ns string -} -` +// struct and constructor variants +const ( + // The following values are bits in a bitmask. + // The values which can be set indicate list support and apply support; + // to make the declarations easier to read (like a truth table), corresponding zero-values + // are also declared. + noList = 0 + noApply = 0 + withList = 1 << iota + withApply +) -// template for the struct that implements the type's interface -var structNonNamespaced = ` -// Fake$.type|publicPlural$ implements $.type|public$Interface -type Fake$.type|publicPlural$ struct { - Fake *Fake$.GroupGoName$$.Version$ +// The following string slices are similar to maps, but with combinable keys used as indices. +// Each entry defines whether it supports lists and/or apply; each bit is then toggled: +// * noList, noApply: index 0; +// * withList, noApply: index 1; +// * noList, withApply: index 2; +// * withList, withApply: index 3. +// Go enforces index unicity in these kinds of declarations. + +// struct declarations +var structType = []string{ + noList | noApply: ` + // fake$.type|publicPlural$ implements $.type|public$Interface + type fake$.type|publicPlural$ struct { + *$.FakeClient|raw$[*$.type|raw$] + Fake *Fake$.GroupGoName$$.Version$ + } + `, + withList | noApply: ` + // fake$.type|publicPlural$ implements $.type|public$Interface + type fake$.type|publicPlural$ struct { + *$.FakeClientWithList|raw$[*$.type|raw$, *$.type|raw$List] + Fake *Fake$.GroupGoName$$.Version$ + } + `, + noList | withApply: ` + // fake$.type|publicPlural$ implements $.type|public$Interface + type fake$.type|publicPlural$ struct { + *$.FakeClientWithApply|raw$[*$.type|raw$, *$.inputApplyConfig|raw$] + Fake *Fake$.GroupGoName$$.Version$ + } + `, + withList | withApply: ` + // fake$.type|publicPlural$ implements $.type|public$Interface + type fake$.type|publicPlural$ struct { + *$.FakeClientWithListAndApply|raw$[*$.type|raw$, *$.type|raw$List, *$.inputApplyConfig|raw$] + Fake *Fake$.GroupGoName$$.Version$ + } + `, } -` -var resource = ` -var $.type|allLowercasePlural$Resource = $.SchemeGroupVersion|raw$.WithResource("$.type|resource$") -` - -var kind = ` -var $.type|allLowercasePlural$Kind = $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$") -` - -var listTemplate = ` -// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. -func (c *Fake$.type|publicPlural$) List(ctx context.Context, opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { - emptyResult := &$.type|raw$List{} - obj, err := c.Fake. - $if .namespaced$Invokes($.NewListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), emptyResult) - $else$Invokes($.NewRootListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), emptyResult)$end$ - if obj == nil { - return emptyResult, err +// Constructors for the struct, in all variants +var newStruct = []string{ + noList | noApply: ` + func newFake$.type|publicPlural$(fake *Fake$.GroupGoName$$.Version$$if .namespaced$, namespace string$end$) $.realClientInterface|raw$ { + return &fake$.type|publicPlural${ + $.NewFakeClient|raw$[*$.type|raw$]( + fake.Fake, + $if .namespaced$namespace$else$""$end$, + $.SchemeGroupVersion|raw$.WithResource("$.type|resource$"), + $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$"), + func() *$.type|raw$ {return &$.type|raw${}}, + ), + fake, + } } - return obj.(*$.type|raw$List), err + `, + noList | withApply: ` + func newFake$.type|publicPlural$(fake *Fake$.GroupGoName$$.Version$$if .namespaced$, namespace string$end$) $.realClientInterface|raw$ { + return &fake$.type|publicPlural${ + $.NewFakeClientWithApply|raw$[*$.type|raw$, *$.inputApplyConfig|raw$]( + fake.Fake, + $if .namespaced$namespace$else$""$end$, + $.SchemeGroupVersion|raw$.WithResource("$.type|resource$"), + $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$"), + func() *$.type|raw$ {return &$.type|raw${}}, + ), + fake, + } + } + `, + withList | noApply: ` + func newFake$.type|publicPlural$(fake *Fake$.GroupGoName$$.Version$$if .namespaced$, namespace string$end$) $.realClientInterface|raw$ { + return &fake$.type|publicPlural${ + $.NewFakeClientWithList|raw$[*$.type|raw$, *$.type|raw$List]( + fake.Fake, + $if .namespaced$namespace$else$""$end$, + $.SchemeGroupVersion|raw$.WithResource("$.type|resource$"), + $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$"), + func() *$.type|raw$ {return &$.type|raw${}}, + func() *$.type|raw$List {return &$.type|raw$List{}}, + func(dst, src *$.type|raw$List) {dst.ListMeta = src.ListMeta}, + func(list *$.type|raw$List) []*$.type|raw$ {return gentype.ToPointerSlice(list.Items)}, + func(list *$.type|raw$List, items []*$.type|raw$) {list.Items = gentype.FromPointerSlice(items)}, + ), + fake, + } + } + `, + withList | withApply: ` + func newFake$.type|publicPlural$(fake *Fake$.GroupGoName$$.Version$$if .namespaced$, namespace string$end$) $.realClientInterface|raw$ { + return &fake$.type|publicPlural${ + $.NewFakeClientWithListAndApply|raw$[*$.type|raw$, *$.type|raw$List, *$.inputApplyConfig|raw$]( + fake.Fake, + $if .namespaced$namespace$else$""$end$, + $.SchemeGroupVersion|raw$.WithResource("$.type|resource$"), + $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$"), + func() *$.type|raw$ {return &$.type|raw${}}, + func() *$.type|raw$List {return &$.type|raw$List{}}, + func(dst, src *$.type|raw$List) {dst.ListMeta = src.ListMeta}, + func(list *$.type|raw$List) []*$.type|raw$ {return gentype.ToPointerSlice(list.Items)}, + func(list *$.type|raw$List, items []*$.type|raw$) {list.Items = gentype.FromPointerSlice(items)}, + ), + fake, + } + } + `, } -` -var listUsingOptionsTemplate = ` +var listTemplate = ` // List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. -func (c *Fake$.type|publicPlural$) List(ctx context.Context, opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { +func (c *fake$.type|publicPlural$) List(ctx $.contextContext|raw$, opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { emptyResult := &$.type|raw$List{} obj, err := c.Fake. - $if .namespaced$Invokes($.NewListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), emptyResult) - $else$Invokes($.NewRootListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), emptyResult)$end$ + $if .namespaced$Invokes($.NewListActionWithOptions|raw$(c.Resource(), c.Kind(), c.Namespace(), opts), emptyResult) + $else$Invokes($.NewRootListActionWithOptions|raw$(c.Resource(), c.Kind(), opts), emptyResult)$end$ if obj == nil { return emptyResult, err } - - label, _, _ := $.ExtractFromListOptions|raw$(opts) - if label == nil { - label = $.Everything|raw$() - } - list := &$.type|raw$List{ListMeta: obj.(*$.type|raw$List).ListMeta} - for _, item := range obj.(*$.type|raw$List).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err + return obj.(*$.type|raw$List), err } ` var getTemplate = ` // Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. -func (c *Fake$.type|publicPlural$) Get(ctx context.Context, name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Get(ctx $.contextContext|raw$, name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewGetActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, name, options), emptyResult) - $else$Invokes($.NewRootGetActionWithOptions|raw$($.type|allLowercasePlural$Resource, name, options), emptyResult)$end$ + $if .namespaced$Invokes($.NewGetActionWithOptions|raw$(c.Resource(), c.Namespace(), name, options), emptyResult) + $else$Invokes($.NewRootGetActionWithOptions|raw$(c.Resource(), name, options), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -385,11 +394,11 @@ func (c *Fake$.type|publicPlural$) Get(ctx context.Context, name string, options var getSubresourceTemplate = ` // Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. -func (c *Fake$.type|publicPlural$) Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Get(ctx $.contextContext|raw$, $.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewGetSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name, options), emptyResult) - $else$Invokes($.NewRootGetSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name, options), emptyResult)$end$ + $if .namespaced$Invokes($.NewGetSubresourceActionWithOptions|raw$(c.Resource(), c.Namespace(), "$.subresourcePath$", $.type|private$Name, options), emptyResult) + $else$Invokes($.NewRootGetSubresourceActionWithOptions|raw$(c.Resource(), "$.subresourcePath$", $.type|private$Name, options), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -399,31 +408,21 @@ func (c *Fake$.type|publicPlural$) Get(ctx context.Context, $.type|private$Name var deleteTemplate = ` // Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. -func (c *Fake$.type|publicPlural$) Delete(ctx context.Context, name string, opts $.DeleteOptions|raw$) error { +func (c *fake$.type|publicPlural$) Delete(ctx $.contextContext|raw$, name string, opts $.DeleteOptions|raw$) error { _, err := c.Fake. - $if .namespaced$Invokes($.NewDeleteActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, name, opts), &$.type|raw${}) - $else$Invokes($.NewRootDeleteActionWithOptions|raw$($.type|allLowercasePlural$Resource, name, opts), &$.type|raw${})$end$ + $if .namespaced$Invokes($.NewDeleteActionWithOptions|raw$(c.Resource(), c.Namespace(), name, opts), &$.type|raw${}) + $else$Invokes($.NewRootDeleteActionWithOptions|raw$(c.Resource(), name, opts), &$.type|raw${})$end$ return err } ` -var deleteCollectionTemplate = ` -// DeleteCollection deletes a collection of objects. -func (c *Fake$.type|publicPlural$) DeleteCollection(ctx context.Context, opts $.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error { - $if .namespaced$action := $.NewDeleteCollectionActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, opts, listOpts) - $else$action := $.NewRootDeleteCollectionActionWithOptions|raw$($.type|allLowercasePlural$Resource, opts, listOpts) - $end$ - _, err := c.Fake.Invokes(action, &$.type|raw$List{}) - return err -} -` var createTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Create(ctx $.contextContext|raw$, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewCreateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$, opts), emptyResult) - $else$Invokes($.NewRootCreateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$, opts), emptyResult)$end$ + $if .namespaced$Invokes($.NewCreateActionWithOptions|raw$(c.Resource(), c.Namespace(), $.inputType|private$, opts), emptyResult) + $else$Invokes($.NewRootCreateActionWithOptions|raw$(c.Resource(), $.inputType|private$, opts), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -433,11 +432,11 @@ func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|priva var createSubresourceTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Create(ctx $.contextContext|raw$, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewCreateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$, opts), emptyResult) - $else$Invokes($.NewRootCreateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", $.inputType|private$, opts), emptyResult)$end$ + $if .namespaced$Invokes($.NewCreateSubresourceActionWithOptions|raw$(c.Resource(), $.type|private$Name, "$.subresourcePath$", c.Namespace(), $.inputType|private$, opts), emptyResult) + $else$Invokes($.NewRootCreateSubresourceActionWithOptions|raw$(c.Resource(), $.type|private$Name, "$.subresourcePath$", $.inputType|private$, opts), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -447,11 +446,11 @@ func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Na var updateTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Update(ctx $.contextContext|raw$, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewUpdateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$, opts), emptyResult) - $else$Invokes($.NewRootUpdateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$, opts), emptyResult)$end$ + $if .namespaced$Invokes($.NewUpdateActionWithOptions|raw$(c.Resource(), c.Namespace(), $.inputType|private$, opts), emptyResult) + $else$Invokes($.NewRootUpdateActionWithOptions|raw$(c.Resource(), $.inputType|private$, opts), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -461,11 +460,11 @@ func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|priva var updateSubresourceTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Update(ctx $.contextContext|raw$, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$, opts), &$.inputType|raw${}) - $else$Invokes($.NewRootUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$, opts), emptyResult)$end$ + $if .namespaced$Invokes($.NewUpdateSubresourceActionWithOptions|raw$(c.Resource(), "$.subresourcePath$", c.Namespace(), $.inputType|private$, opts), &$.inputType|raw${}) + $else$Invokes($.NewRootUpdateSubresourceActionWithOptions|raw$(c.Resource(), "$.subresourcePath$", $.inputType|private$, opts), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -473,37 +472,22 @@ func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Na } ` -var updateStatusTemplate = ` -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *Fake$.type|publicPlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (result *$.type|raw$, err error) { - emptyResult := &$.type|raw${} - obj, err := c.Fake. - $if .namespaced$Invokes($.NewUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$, opts), emptyResult) - $else$Invokes($.NewRootUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$, opts), emptyResult)$end$ - if obj == nil { - return emptyResult, err - } - return obj.(*$.type|raw$), err -} -` - var watchTemplate = ` // Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. -func (c *Fake$.type|publicPlural$) Watch(ctx context.Context, opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { +func (c *fake$.type|publicPlural$) Watch(ctx $.contextContext|raw$, opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { return c.Fake. - $if .namespaced$InvokesWatch($.NewWatchActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, opts)) - $else$InvokesWatch($.NewRootWatchActionWithOptions|raw$($.type|allLowercasePlural$Resource, opts))$end$ + $if .namespaced$InvokesWatch($.NewWatchActionWithOptions|raw$(c.Resource(), c.Namespace(), opts)) + $else$InvokesWatch($.NewRootWatchActionWithOptions|raw$(c.Resource(), opts))$end$ } ` var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. -func (c *Fake$.type|publicPlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Patch(ctx $.contextContext|raw$, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, opts, subresources... ), emptyResult) - $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, name, pt, data, opts, subresources...), emptyResult)$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$(c.Resource(), c.Namespace(), name, pt, data, opts, subresources... ), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$(c.Resource(), name, pt, data, opts, subresources...), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -513,35 +497,9 @@ func (c *Fake$.type|publicPlural$) Patch(ctx context.Context, name string, pt $. var applyTemplate = ` // Apply takes the given apply declarative configuration, applies it and returns the applied $.resultType|private$. -func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { - if $.inputType|private$ == nil { - return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil") - } - data, err := $.jsonMarshal|raw$($.inputType|private$) - if err != nil { - return nil, err - } - name := $.inputType|private$.Name - if name == nil { - return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") - } - emptyResult := &$.resultType|raw${} - obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions()), emptyResult) - $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions()), emptyResult)$end$ - if obj == nil { - return emptyResult, err - } - return obj.(*$.resultType|raw$), err -} -` - -var applyStatusTemplate = ` -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Apply(ctx $.contextContext|raw$, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { if $.inputType|private$ == nil { - return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil") + return nil, $.fmtErrorf|raw$("$.inputType|private$ provided to Apply must not be nil") } data, err := $.jsonMarshal|raw$($.inputType|private$) if err != nil { @@ -549,12 +507,12 @@ func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType| } name := $.inputType|private$.Name if name == nil { - return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") + return nil, $.fmtErrorf|raw$("$.inputType|private$.Name must be provided to Apply") } emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "status"), emptyResult) - $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "status"), emptyResult)$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$(c.Resource(), c.Namespace(), *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions()), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$(c.Resource(), *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions()), emptyResult)$end$ if obj == nil { return emptyResult, err } @@ -565,9 +523,9 @@ func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType| var applySubresourceTemplate = ` // Apply takes top resource name and the apply declarative configuration for $.subresourcePath$, // applies it and returns the applied $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *fake$.type|publicPlural$) Apply(ctx $.contextContext|raw$, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { if $.inputType|private$ == nil { - return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil") + return nil, $.fmtErrorf|raw$("$.inputType|private$ provided to Apply must not be nil") } data, err := $.jsonMarshal|raw$($.inputType|private$) if err != nil { @@ -575,8 +533,8 @@ func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.type|private$Nam } emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, $.type|private$Name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "$.inputType|private$"), emptyResult) - $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "$.inputType|private$"), emptyResult)$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$(c.Resource(), c.Namespace(), $.type|private$Name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "$.inputType|private$"), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$(c.Resource(), $.type|private$Name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "$.inputType|private$"), emptyResult)$end$ if obj == nil { return emptyResult, err } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index 0c043cee9..e84205604 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -73,12 +73,14 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) m := map[string]interface{}{ "allGroups": allGroups, + "fmtErrorf": c.Universe.Type(types.Name{Package: "fmt", Name: "Errorf"}), "Config": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), "DefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), "RESTHTTPClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "HTTPClientFor"}), "DiscoveryInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryInterface"}), "DiscoveryClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryClient"}), + "httpClient": c.Universe.Type(types.Name{Package: "net/http", Name: "Client"}), "NewDiscoveryClientForConfigAndClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigAndClient"}), "NewDiscoveryClientForConfigOrDie": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigOrDie"}), "NewDiscoveryClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClient"}), @@ -160,11 +162,11 @@ var newClientsetForConfigAndClientTemplate = ` // Note the http client provided takes precedence over the configured transport values. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfigAndClient will generate a rate-limiter in configShallowCopy. -func NewForConfigAndClient(c *$.Config|raw$, httpClient *http.Client) (*Clientset, error) { +func NewForConfigAndClient(c *$.Config|raw$, httpClient *$.httpClient|raw$) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + return nil, $.fmtErrorf|raw$("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = $.flowcontrolNewTokenBucketRateLimiter|raw$(configShallowCopy.QPS, configShallowCopy.Burst) } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go index 83b13b11c..819687175 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -64,7 +64,6 @@ func (g *genGroup) Namers(c *generator.Context) namer.NameSystems { func (g *genGroup) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) - imports = append(imports, path.Join(g.clientsetPackage, "scheme")) return } @@ -82,23 +81,28 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer if groupName == "" { apiPath = `"/api"` } - + schemePackage := path.Join(g.clientsetPackage, "scheme") m := map[string]interface{}{ - "version": g.version, - "groupName": groupName, - "GroupGoName": g.groupGoName, - "Version": namer.IC(g.version), - "types": g.types, - "apiPath": apiPath, - "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), - "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), - "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), - "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), - "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), - "RESTHTTPClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "HTTPClientFor"}), - "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), - "restRESTClientForConfigAndClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientForConfigAndClient"}), - "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: g.inputPackage, Name: "SchemeGroupVersion"}), + "version": g.version, + "groupName": groupName, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "types": g.types, + "apiPath": apiPath, + "httpClient": c.Universe.Type(types.Name{Package: "net/http", Name: "Client"}), + "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), + "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), + "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), + "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "RESTHTTPClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "HTTPClientFor"}), + "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), + "restRESTClientForConfigAndClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientForConfigAndClient"}), + "restCodecFactoryForGeneratedClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "CodecFactoryForGeneratedClient"}), + "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: g.inputPackage, Name: "SchemeGroupVersion"}), + "SchemePrioritizedVersionsForGroup": c.Universe.Variable(types.Name{Package: schemePackage, Name: "Scheme.PrioritizedVersionsForGroup"}), + "Codecs": c.Universe.Variable(types.Name{Package: schemePackage, Name: "Codecs"}), + "Scheme": c.Universe.Variable(types.Name{Package: schemePackage, Name: "Scheme"}), } sw.Do(groupInterfaceTemplate, m) sw.Do(groupClientTemplate, m) @@ -179,7 +183,7 @@ func NewForConfig(c *$.restConfig|raw$) (*$.GroupGoName$$.Version$Client, error) var newClientForConfigAndClientTemplate = ` // NewForConfigAndClient creates a new $.GroupGoName$$.Version$Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *$.restConfig|raw$, h *http.Client) (*$.GroupGoName$$.Version$Client, error) { +func NewForConfigAndClient(c *$.restConfig|raw$, h *$.httpClient|raw$) (*$.GroupGoName$$.Version$Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -228,11 +232,11 @@ func setConfigDefaults(config *$.restConfig|raw$) error { if config.UserAgent == "" { config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0] + if config.GroupVersion == nil || config.GroupVersion.Group != $.SchemePrioritizedVersionsForGroup|raw$("$.groupName$")[0].Group { + gv := $.SchemePrioritizedVersionsForGroup|raw$("$.groupName$")[0] config.GroupVersion = &gv } - config.NegotiatedSerializer = scheme.Codecs + config.NegotiatedSerializer = $.restCodecFactoryForGeneratedClient|raw$($.Scheme|raw$, $.Codecs|raw$) if config.QPS == 0 { config.QPS = 5 @@ -250,7 +254,7 @@ func setConfigDefaults(config *$.restConfig|raw$) error { gv := $.SchemeGroupVersion|raw$ config.GroupVersion = &gv config.APIPath = $.apiPath$ - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = $.restCodecFactoryForGeneratedClient|raw$($.Scheme|raw$, $.Codecs|raw$).WithoutConversion() if config.UserAgent == "" { config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go index 7361c48f1..00b949a77 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -23,6 +23,7 @@ import ( "golang.org/x/text/cases" "golang.org/x/text/language" + "k8s.io/gengo/v2/generator" "k8s.io/gengo/v2/namer" "k8s.io/gengo/v2/types" @@ -40,6 +41,7 @@ type genClientForType struct { group string version string groupGoName string + prefersProtobuf bool typeToMatch *types.Type imports namer.ImportTracker } @@ -139,7 +141,8 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), - "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "context": c.Universe.Type(types.Name{Package: "context", Name: "Context"}), }, } if e.HasVerb("apply") { @@ -158,6 +161,7 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i "subresource": false, "subresourcePath": "", "GroupGoName": g.groupGoName, + "prefersProtobuf": g.prefersProtobuf, "Version": namer.IC(g.version), "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), @@ -167,19 +171,27 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), - "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), "schemeParameterCodec": c.Universe.Variable(types.Name{Package: path.Join(g.clientsetPackage, "scheme"), Name: "ParameterCodec"}), - "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "fmtErrorf": c.Universe.Function(types.Name{Package: "fmt", Name: "Errorf"}), + "klogWarningf": c.Universe.Function(types.Name{Package: "k8s.io/klog/v2", Name: "Warningf"}), + "context": c.Universe.Type(types.Name{Package: "context", Name: "Context"}), + "timeDuration": c.Universe.Type(types.Name{Package: "time", Name: "Duration"}), + "timeSecond": c.Universe.Type(types.Name{Package: "time", Name: "Second"}), "resourceVersionMatchNotOlderThan": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ResourceVersionMatchNotOlderThan"}), "CheckListFromCacheDataConsistencyIfRequested": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/consistencydetector", Name: "CheckListFromCacheDataConsistencyIfRequested"}), "CheckWatchListFromCacheDataConsistencyIfRequested": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/consistencydetector", Name: "CheckWatchListFromCacheDataConsistencyIfRequested"}), "PrepareWatchListOptionsFromListOptions": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/watchlist", Name: "PrepareWatchListOptionsFromListOptions"}), + "applyNewRequest": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/apply", Name: "NewRequest"}), "Client": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "Client"}), "ClientWithList": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "ClientWithList"}), "ClientWithApply": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "ClientWithApply"}), "ClientWithListAndApply": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "ClientWithListAndApply"}), + "NewClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewClient"}), + "NewClientWithApply": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewClientWithApply"}), + "NewClientWithList": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewClientWithList"}), + "NewClientWithListAndApply": c.Universe.Function(types.Name{Package: "k8s.io/client-go/gentype", Name: "NewClientWithListAndApply"}), } if generateApply { @@ -267,64 +279,65 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i m["inputType"] = &inputType m["resultType"] = &resultType m["subresourcePath"] = e.SubResourcePath + m["verb"] = e.VerbName if e.HasVerb("apply") { m["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, inputGVString), inputType.Name.Name+"ApplyConfiguration") } if e.HasVerb("get") { if e.IsSubresource() { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m) + sw.Do(getSubresourceTemplate, m) } else { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m) + sw.Do(getTemplate, m) } } if e.HasVerb("list") { if e.IsSubresource() { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, listSubresourceTemplate), m) + sw.Do(listSubresourceTemplate, m) } else { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) - sw.Do(adjustTemplate(e.VerbName, e.VerbType, privateListTemplate), m) - sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchListTemplate), m) + sw.Do(listTemplate, m) + sw.Do(privateListTemplate, m) + sw.Do(watchListTemplate, m) } } // TODO: Figure out schemantic for watching a sub-resource. if e.HasVerb("watch") { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m) + sw.Do(watchTemplate, m) } if e.HasVerb("create") { if e.IsSubresource() { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m) + sw.Do(createSubresourceTemplate, m) } else { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m) + sw.Do(createTemplate, m) } } if e.HasVerb("update") { if e.IsSubresource() { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m) + sw.Do(updateSubresourceTemplate, m) } else { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m) + sw.Do(updateTemplate, m) } } // TODO: Figure out schemantic for deleting a sub-resource (what arguments // are passed, does it need two names? etc. if e.HasVerb("delete") { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m) + sw.Do(deleteTemplate, m) } if e.HasVerb("patch") { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m) + sw.Do(patchTemplate, m) } if e.HasVerb("apply") { if e.IsSubresource() { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, applySubresourceTemplate), m) + sw.Do(applySubresourceTemplate, m) } else { - sw.Do(adjustTemplate(e.VerbName, e.VerbType, applyTemplate), m) + sw.Do(applyTemplate, m) } } } @@ -332,13 +345,6 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i return sw.Error() } -// adjustTemplate adjust the origin verb template using the expansion name. -// TODO: Make the verbs in templates parametrized so the strings.Replace() is -// not needed. -func adjustTemplate(name, verbType, template string) string { - return strings.ReplaceAll(template, " "+titler.String(verbType), " "+name) -} - func generateInterface(defaultVerbTemplates map[string]string, tags util.Tags) string { // need an ordered list here to guarantee order of generated methods. out := []string{} @@ -352,34 +358,34 @@ func generateInterface(defaultVerbTemplates map[string]string, tags util.Tags) s func buildSubresourceDefaultVerbTemplates(generateApply bool) map[string]string { m := map[string]string{ - "create": `Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, - "list": `List(ctx context.Context, $.type|private$Name string, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, - "update": `Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, - "get": `Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, + "create": `Create(ctx $.context|raw$, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, + "list": `List(ctx $.context|raw$, $.type|private$Name string, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, + "update": `Update(ctx $.context|raw$, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, + "get": `Get(ctx $.context|raw$, $.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, } if generateApply { - m["apply"] = `Apply(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (*$.resultType|raw$, error)` + m["apply"] = `Apply(ctx $.context|raw$, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (*$.resultType|raw$, error)` } return m } func buildDefaultVerbTemplates(generateApply bool) map[string]string { m := map[string]string{ - "create": `Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, - "update": `Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, + "create": `Create(ctx $.context|raw$, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, + "update": `Update(ctx $.context|raw$, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, "updateStatus": `// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -UpdateStatus(ctx context.Context, $.inputType|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error)`, - "delete": `Delete(ctx context.Context, name string, opts $.DeleteOptions|raw$) error`, - "deleteCollection": `DeleteCollection(ctx context.Context, opts $.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error`, - "get": `Get(ctx context.Context, name string, opts $.GetOptions|raw$) (*$.resultType|raw$, error)`, - "list": `List(ctx context.Context, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, - "watch": `Watch(ctx context.Context, opts $.ListOptions|raw$) ($.watchInterface|raw$, error)`, - "patch": `Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error)`, +UpdateStatus(ctx $.context|raw$, $.inputType|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error)`, + "delete": `Delete(ctx $.context|raw$, name string, opts $.DeleteOptions|raw$) error`, + "deleteCollection": `DeleteCollection(ctx $.context|raw$, opts $.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error`, + "get": `Get(ctx $.context|raw$, name string, opts $.GetOptions|raw$) (*$.resultType|raw$, error)`, + "list": `List(ctx $.context|raw$, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, + "watch": `Watch(ctx $.context|raw$, opts $.ListOptions|raw$) ($.watchInterface|raw$, error)`, + "patch": `Patch(ctx $.context|raw$, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error)`, } if generateApply { - m["apply"] = `Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` + m["apply"] = `Apply(ctx $.context|raw$, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` m["applyStatus"] = `// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` +ApplyStatus(ctx $.context|raw$, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` } return m } @@ -472,12 +478,14 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClient[*$.resultType|raw$]( + $.NewClient|raw$[*$.resultType|raw$]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, namespace, - func() *$.resultType|raw$ { return &$.resultType|raw${} }), + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -485,12 +493,14 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClientWithApply[*$.resultType|raw$, *$.inputApplyConfig|raw$]( + $.NewClientWithApply|raw$[*$.resultType|raw$, *$.inputApplyConfig|raw$]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, namespace, - func() *$.resultType|raw$ { return &$.resultType|raw${} }), + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -498,13 +508,15 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClientWithList[*$.resultType|raw$, *$.resultType|raw$List]( + $.NewClientWithList|raw$[*$.resultType|raw$, *$.resultType|raw$List]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, namespace, func() *$.resultType|raw$ { return &$.resultType|raw${} }, - func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -512,13 +524,15 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClientWithListAndApply[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$]( + $.NewClientWithListAndApply|raw$[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, namespace, func() *$.resultType|raw$ { return &$.resultType|raw${} }, - func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -526,12 +540,14 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClient[*$.resultType|raw$]( + $.NewClient|raw$[*$.resultType|raw$]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, "", - func() *$.resultType|raw$ { return &$.resultType|raw${} }), + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -539,12 +555,14 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClientWithApply[*$.resultType|raw$, *$.inputApplyConfig|raw$]( + $.NewClientWithApply|raw$[*$.resultType|raw$, *$.inputApplyConfig|raw$]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, "", - func() *$.resultType|raw$ { return &$.resultType|raw${} }), + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -552,13 +570,15 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClientWithList[*$.resultType|raw$, *$.resultType|raw$List]( + $.NewClientWithList|raw$[*$.resultType|raw$, *$.resultType|raw$List]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, "", func() *$.resultType|raw$ { return &$.resultType|raw${} }, - func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, @@ -566,30 +586,32 @@ var newStruct = []string{ // new$.type|publicPlural$ returns a $.type|publicPlural$ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { return &$.type|privatePlural${ - gentype.NewClientWithListAndApply[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$]( + $.NewClientWithListAndApply|raw$[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$]( "$.type|resource$", c.RESTClient(), $.schemeParameterCodec|raw$, "", func() *$.resultType|raw$ { return &$.resultType|raw${} }, - func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }, + $if .prefersProtobuf$gentype.PrefersProtobuf[*$.resultType|raw$](),$end$ + ), } } `, } var listTemplate = ` -// List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. -func (c *$.type|privatePlural$) List(ctx context.Context, opts $.ListOptions|raw$) (*$.resultType|raw$List, error) { +// $.verb$ takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, opts $.ListOptions|raw$) (*$.resultType|raw$List, error) { if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := $.PrepareWatchListOptionsFromListOptions|raw$(opts); watchListOptionsErr != nil { - klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr ) + $.klogWarningf|raw$("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr ) } else if hasWatchListOptionsPrepared { result, err := c.watchList(ctx, watchListOptions) if err == nil { $.CheckWatchListFromCacheDataConsistencyIfRequested|raw$(ctx, "watchlist request for $.type|resource$", c.list, opts, result) return result, nil } - klog.Warningf("The watchlist request for $.type|resource$ ended with an error, falling back to the standard LIST semantics, err = %v", err) + $.klogWarningf|raw$("The watchlist request for $.type|resource$ ended with an error, falling back to the standard LIST semantics, err = %v", err) } result, err := c.list(ctx, opts) if err == nil { @@ -601,13 +623,14 @@ func (c *$.type|privatePlural$) List(ctx context.Context, opts $.ListOptions|raw var privateListTemplate = ` // list takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. -func (c *$.type|privatePlural$) list(ctx context.Context, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { - var timeout time.Duration +func (c *$.type|privatePlural$) list(ctx $.context|raw$, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout $.timeDuration|raw$ if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + timeout = $.timeDuration|raw$(*opts.TimeoutSeconds) * $.timeSecond|raw$ } result = &$.resultType|raw$List{} err = c.GetClient().Get(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). @@ -619,14 +642,15 @@ func (c *$.type|privatePlural$) list(ctx context.Context, opts $.ListOptions|raw ` var listSubresourceTemplate = ` -// List takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. -func (c *$.type|privatePlural$) List(ctx context.Context, $.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { - var timeout time.Duration +// $.verb$ takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout $.timeDuration|raw$ if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + timeout = $.timeDuration|raw$(*opts.TimeoutSeconds) * $.timeSecond|raw$ } result = &$.resultType|raw$List{} err = c.GetClient().Get(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). @@ -640,10 +664,11 @@ func (c *$.type|privatePlural$) List(ctx context.Context, $.type|private$Name st ` var getTemplate = ` -// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. -func (c *$.type|privatePlural$) Get(ctx context.Context, name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Get(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(name). @@ -655,10 +680,11 @@ func (c *$.type|privatePlural$) Get(ctx context.Context, name string, options $. ` var getSubresourceTemplate = ` -// Get takes name of the $.type|private$, and returns the corresponding $.resultType|raw$ object, and an error if there is any. -func (c *$.type|privatePlural$) Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes name of the $.type|private$, and returns the corresponding $.resultType|raw$ object, and an error if there is any. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Get(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). @@ -671,9 +697,10 @@ func (c *$.type|privatePlural$) Get(ctx context.Context, $.type|private$Name str ` var deleteTemplate = ` -// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. -func (c *$.type|privatePlural$) Delete(ctx context.Context, name string, opts $.DeleteOptions|raw$) error { +// $.verb$ takes name of the $.type|private$ and deletes it. Returns an error if one occurs. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, name string, opts $.DeleteOptions|raw$) error { return c.GetClient().Delete(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(name). @@ -684,10 +711,11 @@ func (c *$.type|privatePlural$) Delete(ctx context.Context, name string, opts $. ` var createSubresourceTemplate = ` -// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Post(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). @@ -701,10 +729,11 @@ func (c *$.type|privatePlural$) Create(ctx context.Context, $.type|private$Name ` var createTemplate = ` -// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Post(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). @@ -716,10 +745,11 @@ func (c *$.type|privatePlural$) Create(ctx context.Context, $.inputType|private$ ` var updateSubresourceTemplate = ` -// Update takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Put(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). @@ -733,10 +763,11 @@ func (c *$.type|privatePlural$) Update(ctx context.Context, $.type|private$Name ` var updateTemplate = ` -// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Put(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.inputType|private$.Name). @@ -749,14 +780,15 @@ func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ ` var watchTemplate = ` -// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. -func (c *$.type|privatePlural$) Watch(ctx context.Context, opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { - var timeout time.Duration +// $.verb$ returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { + var timeout $.timeDuration|raw$ if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + timeout = $.timeDuration|raw$(*opts.TimeoutSeconds) * $.timeSecond|raw$ } opts.Watch = true return c.GetClient().Get(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ VersionedParams(&opts, $.schemeParameterCodec|raw$). Timeout(timeout). @@ -766,14 +798,15 @@ func (c *$.type|privatePlural$) Watch(ctx context.Context, opts $.ListOptions|ra var watchListTemplate = ` // watchList establishes a watch stream with the server and returns the list of $.resultType|publicPlural$ -func (c *$.type|privatePlural$) watchList(ctx context.Context, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { - var timeout time.Duration +func (c *$.type|privatePlural$) watchList(ctx $.context|raw$, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout $.timeDuration|raw$ if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + timeout = $.timeDuration|raw$(*opts.TimeoutSeconds) * $.timeSecond|raw$ } result = &$.resultType|raw$List{} - err = c.client.Get(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Get(). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). Timeout(timeout). @@ -784,10 +817,11 @@ func (c *$.type|privatePlural$) watchList(ctx context.Context, opts $.ListOption ` var patchTemplate = ` -// Patch applies the patch and returns the patched $.resultType|private$. -func (c *$.type|privatePlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { +// $.verb$ applies the patch and returns the patched $.resultType|private$. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.GetClient().Patch(pt). + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(name). @@ -801,27 +835,27 @@ func (c *$.type|privatePlural$) Patch(ctx context.Context, name string, pt $.Pat ` var applyTemplate = ` -// Apply takes the given apply declarative configuration, applies it and returns the applied $.resultType|private$. -func (c *$.type|privatePlural$) Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { +// $.verb$ takes the given apply declarative configuration, applies it and returns the applied $.resultType|private$. +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { if $.inputType|private$ == nil { - return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil") + return nil, $.fmtErrorf|raw$("$.inputType|private$ provided to $.verb$ must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := $.jsonMarshal|raw$($.inputType|private$) + name := $.inputType|private$.Name + if name == nil { + return nil, $.fmtErrorf|raw$("$.inputType|private$.Name must be provided to $.verb$") + } + request, err := $.applyNewRequest|raw$(c.GetClient(), $.inputType|private$) if err != nil { return nil, err } - name := $.inputType|private$.Name - if name == nil { - return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") - } result = &$.resultType|raw${} - err = c.GetClient().Patch($.ApplyPatchType|raw$). + err = request. + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(*name). VersionedParams(&patchOpts, $.schemeParameterCodec|raw$). - Body(data). Do(ctx). Into(result) return @@ -829,26 +863,26 @@ func (c *$.type|privatePlural$) Apply(ctx context.Context, $.inputType|private$ ` var applySubresourceTemplate = ` -// Apply takes top resource name and the apply declarative configuration for $.subresourcePath$, +// $.verb$ takes top resource name and the apply declarative configuration for $.subresourcePath$, // applies it and returns the applied $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Apply(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { +func (c *$.type|privatePlural$) $.verb$(ctx $.context|raw$, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { if $.inputType|private$ == nil { - return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil") + return nil, $.fmtErrorf|raw$("$.inputType|private$ provided to $.verb$ must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := $.jsonMarshal|raw$($.inputType|private$) + request, err := $.applyNewRequest|raw$(c.GetClient(), $.inputType|private$) if err != nil { return nil, err } result = &$.resultType|raw${} - err = c.GetClient().Patch($.ApplyPatchType|raw$). + err = request. + $if .prefersProtobuf$UseProtobufAsDefault().$end$ $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). VersionedParams(&patchOpts, $.schemeParameterCodec|raw$). - Body(data). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go index cc61c48d4..eaadaa1b9 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go @@ -20,6 +20,8 @@ import ( "fmt" "github.com/spf13/pflag" + + "k8s.io/gengo/v2" ) // DefaultBasePeerDirs are the peer-dirs nearly everybody will use, i.e. those coming from @@ -43,11 +45,6 @@ type Args struct { // generator pick up manually written conversion funcs from external packages. ExtraPeerDirs []string - // Additional dirs to parse and load, but not consider for peers. This is - // useful when packages depend on other packages and want to call - // conversions across them. - ExtraDirs []string - // SkipUnsafe indicates whether to generate unsafe conversions to improve the efficiency // of these operations. The unsafe operation is a direct pointer assignment via unsafe // (within the allowed uses of unsafe) and is equivalent to a proposed Golang change to @@ -57,13 +54,20 @@ type Args struct { // GoHeaderFile is the path to a boilerplate header file for generated // code. GoHeaderFile string + + // GeneratedBuildTag is the tag used to identify code generated by execution + // of this type. Each generator should use a different tag, and different + // groups of generators (external API that depends on Kube generations) should + // keep tags distinct as well. + GeneratedBuildTag string } // New returns default arguments for the generator. func New() *Args { return &Args{ - BasePeerDirs: DefaultBasePeerDirs, - SkipUnsafe: false, + BasePeerDirs: DefaultBasePeerDirs, + SkipUnsafe: false, + GeneratedBuildTag: gengo.StdBuildTag, } } @@ -75,12 +79,11 @@ func (args *Args) AddFlags(fs *pflag.FlagSet) { "Comma-separated list of apimachinery import paths which are considered, after tag-specified peers, for conversions. Only change these if you have very good reasons.") fs.StringSliceVar(&args.ExtraPeerDirs, "extra-peer-dirs", args.ExtraPeerDirs, "Application specific comma-separated list of import paths which are considered, after tag-specified peers and base-peer-dirs, for conversions.") - fs.StringSliceVar(&args.ExtraDirs, "extra-dirs", args.ExtraDirs, - "Application specific comma-separated list of import paths which are loaded and considered for callable conversions, but are not considered peers for conversion.") fs.BoolVar(&args.SkipUnsafe, "skip-unsafe", args.SkipUnsafe, "If true, will not generate code using unsafe pointer conversions; resulting code may be slower.") fs.StringVar(&args.GoHeaderFile, "go-header-file", "", "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") + fs.StringVar(&args.GeneratedBuildTag, "build-tag", args.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.") } // Validate checks the given arguments. diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index f58130af2..a5f1b0cfb 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -158,16 +158,16 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package klog.V(6).Infof("%s has a receiver", f.Name) continue } - if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { + if len(signature.Parameters) != 3 || signature.Parameters[2].Type.Name != scopeName { klog.V(6).Infof("%s has wrong parameters", f.Name) continue } - if len(signature.Results) != 1 || signature.Results[0].Name != errorName { + if len(signature.Results) != 1 || signature.Results[0].Type.Name != errorName { klog.V(6).Infof("%s has wrong results", f.Name) continue } - inType := signature.Parameters[0] - outType := signature.Parameters[1] + inType := signature.Parameters[0].Type + outType := signature.Parameters[1].Type if inType.Kind != types.Pointer || outType.Kind != types.Pointer { klog.V(6).Infof("%s has wrong parameter types", f.Name) continue @@ -196,7 +196,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package } func GetTargets(context *generator.Context, args *args.Args) []generator.Target { - boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, args.GeneratedBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } @@ -478,7 +478,7 @@ func NewGenConversion(outputFilename, typesPackage, outputPackage string, manual outputPackage: outputPackage, peerPackages: peerPkgs, manualConversions: manualConversions, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPackage), types: []*types.Type{}, explicitConversions: []conversionPair{}, skippedFields: map[*types.Type][]string{}, diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index cd52a9b96..5aec5025d 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -128,7 +128,7 @@ func main() { generators.NameSystems(), generators.DefaultNameSystem(), myTargets, - gengo.StdBuildTag, + args.GeneratedBuildTag, pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go index c6047aaca..e1a8e4c16 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go @@ -222,7 +222,7 @@ func NewGenDeepCopy(outputFilename, targetPackage string, boundingDirs []string, boundingDirs: boundingDirs, allTypes: allTypes, registerTypes: registerTypes, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(targetPackage), typesForInit: make([]*types.Type, 0), } } @@ -277,8 +277,8 @@ func deepCopyMethod(t *types.Type) (*types.Signature, error) { return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one result", t) } - ptrResult := f.Signature.Results[0].Kind == types.Pointer && f.Signature.Results[0].Elem.Name == t.Name - nonPtrResult := f.Signature.Results[0].Name == t.Name + ptrResult := f.Signature.Results[0].Type.Kind == types.Pointer && f.Signature.Results[0].Type.Elem.Name == t.Name + nonPtrResult := f.Signature.Results[0].Type.Name == t.Name if !ptrResult && !nonPtrResult { return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected to return %s or *%s", t, t.Name.Name, t.Name.Name) @@ -329,7 +329,7 @@ func deepCopyIntoMethod(t *types.Type) (*types.Signature, error) { return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no result type", t) } - ptrParam := f.Signature.Parameters[0].Kind == types.Pointer && f.Signature.Parameters[0].Elem.Name == t.Name + ptrParam := f.Signature.Parameters[0].Type.Kind == types.Pointer && f.Signature.Parameters[0].Type.Elem.Name == t.Name if !ptrParam { return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected parameter of type *%s", t, t.Name.Name) @@ -696,7 +696,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { leftPointer := ut.Elem.Kind == types.Pointer rightPointer := !isReference(ut.Elem) if dc != nil { - rightPointer = dc.Results[0].Kind == types.Pointer + rightPointer = dc.Results[0].Type.Kind == types.Pointer } if leftPointer == rightPointer { sw.Do("(*out)[key] = val.DeepCopy()\n", nil) @@ -812,7 +812,7 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { leftPointer := ft.Kind == types.Pointer rightPointer := !isReference(ft) if dc != nil { - rightPointer = dc.Results[0].Kind == types.Pointer + rightPointer = dc.Results[0].Type.Kind == types.Pointer } if leftPointer == rightPointer { sw.Do("out.$.name$ = in.$.name$.DeepCopy()\n", args) @@ -866,7 +866,7 @@ func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) { case dc != nil || dci != nil: rightPointer := !isReference(ut.Elem) if dc != nil { - rightPointer = dc.Results[0].Kind == types.Pointer + rightPointer = dc.Results[0].Type.Kind == types.Pointer } if rightPointer { sw.Do("*out = (*in).DeepCopy()\n", nil) diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go index 52a9d1c67..8d8dfe97f 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go @@ -20,17 +20,27 @@ import ( "fmt" "github.com/spf13/pflag" + + "k8s.io/gengo/v2" ) type Args struct { OutputFile string ExtraPeerDirs []string // Always consider these as last-ditch possibilities for conversions. GoHeaderFile string + + // GeneratedBuildTag is the tag used to identify code generated by execution + // of this type. Each generator should use a different tag, and different + // groups of generators (external API that depends on Kube generations) should + // keep tags distinct as well. + GeneratedBuildTag string } // New returns default arguments for the generator. func New() *Args { - return &Args{} + return &Args{ + GeneratedBuildTag: gengo.StdBuildTag, + } } // AddFlags add the generator flags to the flag set. @@ -41,6 +51,7 @@ func (args *Args) AddFlags(fs *pflag.FlagSet) { "Comma-separated list of import paths which are considered, after tag-specified peers, for conversions.") fs.StringVar(&args.GoHeaderFile, "go-header-file", "", "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") + fs.StringVar(&args.GeneratedBuildTag, "build-tag", args.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.") } // Validate checks the given arguments. diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go index 64b9ff299..82e1cdd20 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go @@ -165,7 +165,7 @@ func getManualDefaultingFunctions(context *generator.Context, pkg *types.Package if len(signature.Results) != 0 { continue } - inType := signature.Parameters[0] + inType := signature.Parameters[0].Type if inType.Kind != types.Pointer { continue } @@ -222,7 +222,7 @@ func getManualDefaultingFunctions(context *generator.Context, pkg *types.Package } func GetTargets(context *generator.Context, args *args.Args) []generator.Target { - boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, args.GeneratedBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } @@ -823,7 +823,7 @@ func (g *genDefaulter) GenerateType(c *generator.Context, t *types.Type, w io.Wr }) sw := generator.NewSnippetWriter(w, c, "$", "$") - g.generateDefaulter(t, callTree, sw) + g.generateDefaulter(c, t, callTree, sw) return sw.Error() } @@ -833,9 +833,9 @@ func defaultingArgsFromType(inType *types.Type) generator.Args { } } -func (g *genDefaulter) generateDefaulter(inType *types.Type, callTree *callNode, sw *generator.SnippetWriter) { +func (g *genDefaulter) generateDefaulter(c *generator.Context, inType *types.Type, callTree *callNode, sw *generator.SnippetWriter) { sw.Do("func $.inType|objectdefaultfn$(in *$.inType|raw$) {\n", defaultingArgsFromType(inType)) - callTree.WriteMethod("in", 0, nil, sw) + callTree.WriteMethod(c, "in", 0, nil, sw) sw.Do("}\n\n", nil) } @@ -996,15 +996,19 @@ func getTypeZeroValue(t string) (interface{}, error) { return defaultZero, nil } -func (n *callNode) writeDefaulter(varName string, index string, isVarPointer bool, sw *generator.SnippetWriter) { +func (n *callNode) writeDefaulter(c *generator.Context, varName string, index string, isVarPointer bool, sw *generator.SnippetWriter) { if n.defaultValue.IsEmpty() { return } + + jsonUnmarshalType := c.Universe.Type(types.Name{Package: "encoding/json", Name: "Unmarshal"}) + args := generator.Args{ - "defaultValue": n.defaultValue.Resolved(), - "varName": varName, - "index": index, - "varTopType": n.defaultTopLevelType, + "defaultValue": n.defaultValue.Resolved(), + "varName": varName, + "index": index, + "varTopType": n.defaultTopLevelType, + "jsonUnmarshal": jsonUnmarshalType, } variablePlaceholder := "" @@ -1101,13 +1105,13 @@ func (n *callNode) writeDefaulter(varName string, index string, isVarPointer boo // This applies to maps with non-primitive values (eg: map[string]SubStruct) if n.key { sw.Do("$.mapDefaultVar$ := $.varName$[$.index$]\n", args) - sw.Do("if err := json.Unmarshal([]byte(`$.defaultValue$`), &$.mapDefaultVar$); err != nil {\n", args) + sw.Do("if err := $.jsonUnmarshal|raw$([]byte(`$.defaultValue$`), &$.mapDefaultVar$); err != nil {\n", args) } else { variablePointer := variablePlaceholder if !isVarPointer { variablePointer = "&" + variablePointer } - sw.Do(fmt.Sprintf("if err := json.Unmarshal([]byte(`$.defaultValue$`), %s); err != nil {\n", variablePointer), args) + sw.Do(fmt.Sprintf("if err := $.jsonUnmarshal|raw$([]byte(`$.defaultValue$`), %s); err != nil {\n", variablePointer), args) } sw.Do("panic(err)\n", nil) sw.Do("}\n", nil) @@ -1121,7 +1125,7 @@ func (n *callNode) writeDefaulter(varName string, index string, isVarPointer boo // WriteMethod performs an in-order traversal of the calltree, generating loops and if blocks as necessary // to correctly turn the call tree into a method body that invokes all calls on all child nodes of the call tree. // Depth is used to generate local variables at the proper depth. -func (n *callNode) WriteMethod(varName string, depth int, ancestors []*callNode, sw *generator.SnippetWriter) { +func (n *callNode) WriteMethod(c *generator.Context, varName string, depth int, ancestors []*callNode, sw *generator.SnippetWriter) { // if len(n.call) > 0 { // sw.Do(fmt.Sprintf("// %s\n", callPath(append(ancestors, n)).String()), nil) // } @@ -1153,10 +1157,10 @@ func (n *callNode) WriteMethod(varName string, depth int, ancestors []*callNode, } } - n.writeDefaulter(varName, index, isPointer, sw) + n.writeDefaulter(c, varName, index, isPointer, sw) n.writeCalls(local, true, sw) for i := range n.children { - n.children[i].WriteMethod(local, depth+1, append(ancestors, n), sw) + n.children[i].WriteMethod(c, local, depth+1, append(ancestors, n), sw) } sw.Do("}\n", nil) case n.key: @@ -1165,14 +1169,14 @@ func (n *callNode) WriteMethod(varName string, depth int, ancestors []*callNode, index = index + "_" + ancestors[len(ancestors)-1].field vars["index"] = index sw.Do("for $.index$ := range $.var$ {\n", vars) - n.writeDefaulter(varName, index, isPointer, sw) + n.writeDefaulter(c, varName, index, isPointer, sw) sw.Do("}\n", nil) } default: - n.writeDefaulter(varName, index, isPointer, sw) + n.writeDefaulter(c, varName, index, isPointer, sw) n.writeCalls(varName, isPointer, sw) for i := range n.children { - n.children[i].WriteMethod(varName, depth, append(ancestors, n), sw) + n.children[i].WriteMethod(c, varName, depth, append(ancestors, n), sw) } } diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go index d57ca0666..0250c1328 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -74,7 +74,7 @@ func main() { generators.NameSystems(), generators.DefaultNameSystem(), myTargets, - gengo.StdBuildTag, + args.GeneratedBuildTag, pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go index 0fc653dda..c9eadd812 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -145,10 +145,7 @@ func isOptionalAlias(t *types.Type) bool { if t.Underlying == nil || (t.Underlying.Kind != types.Map && t.Underlying.Kind != types.Slice) { return false } - if extractBoolTagOrDie("protobuf.nullable", t.CommentLines) == false { - return false - } - return true + return extractBoolTagOrDie("protobuf.nullable", t.CommentLines) } func (g *genProtoIDL) Imports(c *generator.Context) (imports []string) { @@ -187,7 +184,7 @@ func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Wri case types.Struct: return b.doStruct(sw) default: - return b.unknown(sw) + return b.unknown() } } @@ -262,7 +259,7 @@ type bodyGen struct { t *types.Type } -func (b bodyGen) unknown(sw *generator.SnippetWriter) error { +func (b bodyGen) unknown() error { return fmt.Errorf("not sure how to generate: %#v", b.t) } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index d8e8873aa..e863b1d2f 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -61,7 +61,6 @@ func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { func (g *genericGenerator) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) - imports = append(imports, "fmt") return } @@ -127,6 +126,7 @@ func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w i "cacheGenericLister": c.Universe.Type(cacheGenericLister), "cacheNewGenericLister": c.Universe.Function(cacheNewGenericLister), "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "fmtErrorf": c.Universe.Type(fmtErrorfFunc), "groups": groups, "schemeGVs": schemeGVs, "schemaGroupResource": c.Universe.Type(schemaGroupResource), @@ -179,6 +179,6 @@ func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResour {{end -}} } - return nil, fmt.Errorf("no informer found for %v", resource) + return nil, {{.fmtErrorf|raw}}("no informer found for %v", resource) } ` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go index 9d786ea07..4034695dd 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -86,6 +86,7 @@ func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w "cacheNewSharedIndexInformer": c.Universe.Function(cacheNewSharedIndexInformer), "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), "clientSetInterface": clientSetInterface, + "contextTODO": c.Universe.Type(contextTODOFunc), "group": namer.IC(g.groupGoName), "informerFor": informerFor, "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), @@ -151,13 +152,13 @@ func NewFiltered$.type|public$Informer(client $.clientSetInterface|raw$$if .name if tweakListOptions != nil { tweakListOptions(&options) } - return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).List(context.TODO(), options) + return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).List($.contextTODO|raw$(), options) }, WatchFunc: func(options $.v1ListOptions|raw$) ($.watchInterface|raw$, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).Watch(context.TODO(), options) + return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).Watch($.contextTODO|raw$(), options) }, }, &$.type|raw${}, diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go index 65dc89d03..2e886d4e0 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go @@ -249,7 +249,7 @@ func factoryTarget(outputDirBase, outputPkgBase string, boilerplate []byte, grou OutputFilename: "factory.go", }, outputPackage: outputPkgBase, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkgBase), groupVersions: groupVersions, clientSetPackage: clientSetPackage, internalInterfacesPackage: path.Join(outputPkgBase, subdirForInternalInterfaces), @@ -261,7 +261,7 @@ func factoryTarget(outputDirBase, outputPkgBase string, boilerplate []byte, grou OutputFilename: "generic.go", }, outputPackage: outputPkgBase, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkgBase), groupVersions: groupVersions, pluralExceptions: pluralExceptions, typesForGroupVersion: typesForGroupVersion, @@ -288,7 +288,7 @@ func factoryInterfaceTarget(outputDirBase, outputPkgBase string, boilerplate []b OutputFilename: "factory_interfaces.go", }, outputPackage: outputPkg, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), clientSetPackage: clientSetPackage, }) @@ -314,7 +314,7 @@ func groupTarget(outputDirBase, outputPackageBase string, groupVersions clientge }, outputPackage: outputPkg, groupVersions: groupVersions, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), internalInterfacesPackage: path.Join(outputPackageBase, subdirForInternalInterfaces), }) return generators @@ -342,7 +342,7 @@ func versionTarget(outputDirBase, outputPkgBase string, groupPkgName string, gv OutputFilename: "interface.go", }, outputPackage: outputPkg, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), types: typesToGenerate, internalInterfacesPackage: path.Join(outputPkgBase, subdirForInternalInterfaces), }) @@ -357,7 +357,7 @@ func versionTarget(outputDirBase, outputPkgBase string, groupPkgName string, gv groupVersion: gv, groupGoName: groupGoName, typeToGenerate: t, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), clientSetPackage: clientSetPackage, listersPackage: listersPackage, internalInterfacesPackage: path.Join(outputPkgBase, subdirForInternalInterfaces), diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go index 4ca511ea4..b717adfd3 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go @@ -29,6 +29,8 @@ var ( cacheNewSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewSharedIndexInformer"} cacheSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "SharedIndexInformer"} cacheTransformFunc = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "TransformFunc"} + contextTODOFunc = types.Name{Package: "context", Name: "TODO"} + fmtErrorfFunc = types.Name{Package: "fmt", Name: "Errorf"} listOptions = types.Name{Package: "k8s.io/kubernetes/pkg/apis/core", Name: "ListOptions"} reflectType = types.Name{Package: "reflect", Name: "Type"} runtimeObject = types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Object"} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go index 8955e076f..a5beb2a7f 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -149,7 +149,7 @@ func GetTargets(context *generator.Context, args *args.Args) []generator.Target groupVersion: gv, internalGVPkg: internalGVPkg, typeToGenerate: t, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPkg), objectMeta: objectMeta, }) } @@ -213,11 +213,6 @@ func (g *listerGenerator) Namers(c *generator.Context) namer.NameSystems { func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) - imports = append(imports, "k8s.io/apimachinery/pkg/api/errors") - imports = append(imports, "k8s.io/apimachinery/pkg/labels") - imports = append(imports, "k8s.io/client-go/listers") - // for Indexer - imports = append(imports, "k8s.io/client-go/tools/cache") return } @@ -226,9 +221,14 @@ func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io klog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ - "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), - "type": t, - "objectMeta": g.objectMeta, + "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), + "labelsSelector": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Selector"}), + "listersResourceIndexer": c.Universe.Function(types.Name{Package: "k8s.io/client-go/listers", Name: "ResourceIndexer"}), + "listersNew": c.Universe.Function(types.Name{Package: "k8s.io/client-go/listers", Name: "New"}), + "listersNewNamespaced": c.Universe.Function(types.Name{Package: "k8s.io/client-go/listers", Name: "NewNamespaced"}), + "cacheIndexer": c.Universe.Type(types.Name{Package: "k8s.io/client-go/tools/cache", Name: "Indexer"}), + "type": t, + "objectMeta": g.objectMeta, } tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) @@ -262,7 +262,7 @@ var typeListerInterface = ` type $.type|public$Lister interface { // List lists all $.type|publicPlural$ in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*$.type|raw$, err error) + List(selector $.labelsSelector|raw$) (ret []*$.type|raw$, err error) // $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. $.type|publicPlural$(namespace string) $.type|public$NamespaceLister $.type|public$ListerExpansion @@ -275,7 +275,7 @@ var typeListerInterfaceNonNamespaced = ` type $.type|public$Lister interface { // List lists all $.type|publicPlural$ in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*$.type|raw$, err error) + List(selector $.labelsSelector|raw$) (ret []*$.type|raw$, err error) // Get retrieves the $.type|public$ from the index for a given name. // Objects returned here must be treated as read-only. Get(name string) (*$.type|raw$, error) @@ -289,21 +289,21 @@ type $.type|public$Lister interface { var typeListerStruct = ` // $.type|private$Lister implements the $.type|public$Lister interface. type $.type|private$Lister struct { - listers.ResourceIndexer[*$.type|raw$] + $.listersResourceIndexer|raw$[*$.type|raw$] } ` var typeListerConstructor = ` // New$.type|public$Lister returns a new $.type|public$Lister. -func New$.type|public$Lister(indexer cache.Indexer) $.type|public$Lister { - return &$.type|private$Lister{listers.New[*$.type|raw$](indexer, $.Resource|raw$("$.type|lowercaseSingular$"))} +func New$.type|public$Lister(indexer $.cacheIndexer|raw$) $.type|public$Lister { + return &$.type|private$Lister{$.listersNew|raw$[*$.type|raw$](indexer, $.Resource|raw$("$.type|lowercaseSingular$"))} } ` var typeListerNamespaceLister = ` // $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. func (s *$.type|private$Lister) $.type|publicPlural$(namespace string) $.type|public$NamespaceLister { - return $.type|private$NamespaceLister{listers.NewNamespaced[*$.type|raw$](s.ResourceIndexer, namespace)} + return $.type|private$NamespaceLister{$.listersNewNamespaced|raw$[*$.type|raw$](s.ResourceIndexer, namespace)} } ` @@ -313,7 +313,7 @@ var namespaceListerInterface = ` type $.type|public$NamespaceLister interface { // List lists all $.type|publicPlural$ in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*$.type|raw$, err error) + List(selector $.labelsSelector|raw$) (ret []*$.type|raw$, err error) // Get retrieves the $.type|public$ from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. Get(name string) (*$.type|raw$, error) @@ -328,6 +328,6 @@ var namespaceListerStruct = ` // $.type|private$NamespaceLister implements the $.type|public$NamespaceLister // interface. type $.type|private$NamespaceLister struct { - listers.ResourceIndexer[*$.type|raw$] + $.listersResourceIndexer|raw$[*$.type|raw$] } ` diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go index d2998261c..952e276f7 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go @@ -115,7 +115,7 @@ func GetTargets(context *generator.Context, args *args.Args) []generator.Target gv: gv, typesToGenerate: typesToRegister, outputPackage: pkg.Path, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(pkg.Path), }, } }, diff --git a/vendor/k8s.io/code-generator/kube_codegen.sh b/vendor/k8s.io/code-generator/kube_codegen.sh index f57c52cae..8207da5ed 100644 --- a/vendor/k8s.io/code-generator/kube_codegen.sh +++ b/vendor/k8s.io/code-generator/kube_codegen.sh @@ -433,6 +433,9 @@ function kube::codegen::gen_openapi() { # --plural-exceptions # An optional list of comma separated plural exception definitions in Type:PluralizedType form. # +# --prefers-protobuf +# Enables generation of clientsets that use protobuf for API requests. +# function kube::codegen::gen_client() { local in_dir="" local one_input_api="" @@ -450,6 +453,7 @@ function kube::codegen::gen_client() { local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" local plural_exceptions="" local v="${KUBE_VERBOSE:-0}" + local prefers_protobuf="false" while [ "$#" -gt 0 ]; do case "$1" in @@ -509,6 +513,10 @@ function kube::codegen::gen_client() { plural_exceptions="$2" shift 2 ;; + "--prefers-protobuf") + prefers_protobuf="true" + shift + ;; *) if [[ "$1" =~ ^-- ]]; then echo "unknown argument: $1" >&2 @@ -625,6 +633,7 @@ function kube::codegen::gen_client() { --apply-configuration-package "${applyconfig_pkg}" \ --input-base "$(cd "${in_dir}" && pwd -P)" `# must be absolute path or Go import path"` \ --plural-exceptions "${plural_exceptions}" \ + --prefers-protobuf="${prefers_protobuf}" \ "${inputs[@]}" if [ "${watchable}" == "true" ]; then diff --git a/vendor/k8s.io/component-base/cli/flag/flags.go b/vendor/k8s.io/component-base/cli/flag/flags.go index 2388340d5..8d4a59ce9 100644 --- a/vendor/k8s.io/component-base/cli/flag/flags.go +++ b/vendor/k8s.io/component-base/cli/flag/flags.go @@ -24,7 +24,7 @@ import ( "k8s.io/klog/v2" ) -var underscoreWarnings = make(map[string]bool) +var underscoreWarnings = make(map[string]struct{}) // WordSepNormalizeFunc changes all flags that contain "_" separators func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { @@ -40,7 +40,7 @@ func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedNam nname := strings.Replace(name, "_", "-", -1) if _, alreadyWarned := underscoreWarnings[name]; !alreadyWarned { klog.Warningf("using an underscore in a flag name is not supported. %s has been converted to %s.", name, nname) - underscoreWarnings[name] = true + underscoreWarnings[name] = struct{}{} } return pflag.NormalizedName(nname) diff --git a/vendor/k8s.io/component-base/config/options/leaderelectionconfig.go b/vendor/k8s.io/component-base/config/options/leaderelectionconfig.go index bf2a44a0a..bd90b6481 100644 --- a/vendor/k8s.io/component-base/config/options/leaderelectionconfig.go +++ b/vendor/k8s.io/component-base/config/options/leaderelectionconfig.go @@ -42,8 +42,7 @@ func BindLeaderElectionFlags(l *config.LeaderElectionConfiguration, fs *pflag.Fl "of a leadership. This is only applicable if leader election is enabled.") fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+ "The type of resource object that is used for locking during "+ - "leader election. Supported options are 'leases', 'endpointsleases' "+ - "and 'configmapsleases'.") + "leader election. Supported options are 'leases'.") fs.StringVar(&l.ResourceName, "leader-elect-resource-name", l.ResourceName, ""+ "The name of resource object that is used for locking during "+ "leader election.") diff --git a/vendor/k8s.io/apiserver/pkg/util/version/registry.go b/vendor/k8s.io/component-base/featuregate/registry.go similarity index 94% rename from vendor/k8s.io/apiserver/pkg/util/version/registry.go rename to vendor/k8s.io/component-base/featuregate/registry.go index 0db9c48a6..cf35403da 100644 --- a/vendor/k8s.io/apiserver/pkg/util/version/registry.go +++ b/vendor/k8s.io/component-base/featuregate/registry.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package version +package featuregate import ( "fmt" @@ -26,7 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/version" cliflag "k8s.io/component-base/cli/flag" - "k8s.io/component-base/featuregate" + baseversion "k8s.io/component-base/version" "k8s.io/klog/v2" ) @@ -67,8 +67,8 @@ type VersionMapping func(from *version.Version) *version.Version // ComponentGlobals stores the global variables for a component for easy access. type ComponentGlobals struct { - effectiveVersion MutableEffectiveVersion - featureGate featuregate.MutableVersionedFeatureGate + effectiveVersion baseversion.MutableEffectiveVersion + featureGate MutableVersionedFeatureGate // emulationVersionMapping contains the mapping from the emulation version of this component // to the emulation version of another component. @@ -87,16 +87,16 @@ type ComponentGlobals struct { type ComponentGlobalsRegistry interface { // EffectiveVersionFor returns the EffectiveVersion registered under the component. // Returns nil if the component is not registered. - EffectiveVersionFor(component string) EffectiveVersion + EffectiveVersionFor(component string) baseversion.EffectiveVersion // FeatureGateFor returns the FeatureGate registered under the component. // Returns nil if the component is not registered. - FeatureGateFor(component string) featuregate.FeatureGate + FeatureGateFor(component string) FeatureGate // Register registers the EffectiveVersion and FeatureGate for a component. // returns error if the component is already registered. - Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error + Register(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error // ComponentGlobalsOrRegister would return the registered global variables for the component if it already exists in the registry. // Otherwise, the provided variables would be registered under the component, and the same variables would be returned. - ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) + ComponentGlobalsOrRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) (baseversion.MutableEffectiveVersion, MutableVersionedFeatureGate) // AddFlags adds flags of "--emulated-version" and "--feature-gates" AddFlags(fs *pflag.FlagSet) // Set sets the flags for all global variables for all components registered. @@ -143,7 +143,7 @@ func (r *componentGlobalsRegistry) Reset() { r.set = false } -func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) EffectiveVersion { +func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) baseversion.EffectiveVersion { r.mutex.RLock() defer r.mutex.RUnlock() globals, ok := r.componentGlobals[component] @@ -153,7 +153,7 @@ func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) Effecti return globals.effectiveVersion } -func (r *componentGlobalsRegistry) FeatureGateFor(component string) featuregate.FeatureGate { +func (r *componentGlobalsRegistry) FeatureGateFor(component string) FeatureGate { r.mutex.RLock() defer r.mutex.RUnlock() globals, ok := r.componentGlobals[component] @@ -163,7 +163,7 @@ func (r *componentGlobalsRegistry) FeatureGateFor(component string) featuregate. return globals.featureGate } -func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error { +func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error { if _, ok := r.componentGlobals[component]; ok { return fmt.Errorf("component globals of %s already registered", component) } @@ -182,7 +182,7 @@ func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVer return nil } -func (r *componentGlobalsRegistry) Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error { +func (r *componentGlobalsRegistry) Register(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error { if effectiveVersion == nil { return fmt.Errorf("cannot register nil effectiveVersion") } @@ -191,7 +191,7 @@ func (r *componentGlobalsRegistry) Register(component string, effectiveVersion M return r.unsafeRegister(component, effectiveVersion, featureGate) } -func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) { +func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) (baseversion.MutableEffectiveVersion, MutableVersionedFeatureGate) { r.mutex.Lock() defer r.mutex.Unlock() globals, ok := r.componentGlobals[component] diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go index 5664a68a9..8a7dd7154 100644 --- a/vendor/k8s.io/component-base/metrics/counter.go +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -18,15 +18,19 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/trace" + dto "github.com/prometheus/client_model/go" ) // Counter is our internal representation for our wrapping struct around prometheus // counters. Counter implements both kubeCollector and CounterMetric. type Counter struct { + ctx context.Context CounterMetric *CounterOpts lazyMetric @@ -36,6 +40,14 @@ type Counter struct { // The implementation of the Metric interface is expected by testutil.GetCounterMetricValue. var _ Metric = &Counter{} +// All supported exemplar metric types implement the metricWithExemplar interface. +var _ metricWithExemplar = &Counter{} + +// exemplarCounterMetric holds a context to extract exemplar labels from, and a counter metric to attach them to. It implements the metricWithExemplar interface. +type exemplarCounterMetric struct { + *Counter +} + // NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. @@ -93,11 +105,42 @@ func (c *Counter) initializeDeprecatedMetric() { c.initializeMetric() } -// WithContext allows the normal Counter metric to pass in context. The context is no-op now. +// WithContext allows the normal Counter metric to pass in context. func (c *Counter) WithContext(ctx context.Context) CounterMetric { + c.ctx = ctx return c.CounterMetric } +// withExemplar initializes the exemplarMetric object and sets the exemplar value. +func (c *Counter) withExemplar(v float64) { + (&exemplarCounterMetric{c}).withExemplar(v) +} + +func (c *Counter) Add(v float64) { + c.withExemplar(v) +} + +func (c *Counter) Inc() { + c.withExemplar(1) +} + +// withExemplar attaches an exemplar to the metric. +func (e *exemplarCounterMetric) withExemplar(v float64) { + if m, ok := e.CounterMetric.(prometheus.ExemplarAdder); ok { + maybeSpanCtx := trace.SpanContextFromContext(e.ctx) + if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() { + exemplarLabels := prometheus.Labels{ + "trace_id": maybeSpanCtx.TraceID().String(), + "span_id": maybeSpanCtx.SpanID().String(), + } + m.AddWithExemplar(v, exemplarLabels) + return + } + } + + e.CounterMetric.Add(v) +} + // CounterVec is the internal representation of our wrapping struct around prometheus // counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. type CounterVec struct { @@ -119,11 +162,6 @@ func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() cv := &CounterVec{ CounterVec: noopCounterVec, @@ -176,7 +214,17 @@ func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } + return v.CounterVec.WithLabelValues(lvs...) } @@ -190,6 +238,15 @@ func (v *CounterVec) With(labels map[string]string) CounterMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } return v.CounterVec.With(labels) } @@ -217,6 +274,13 @@ func (v *CounterVec) Reset() { v.CounterVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the CounterVec. +// NOTE: This should only be used in test. +func (v *CounterVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped CounterVec with context func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext { return &CounterVecWithContext{ diff --git a/vendor/k8s.io/component-base/metrics/features/kube_features.go b/vendor/k8s.io/component-base/metrics/features/kube_features.go index 3cd6c22af..5f41802cb 100644 --- a/vendor/k8s.io/component-base/metrics/features/kube_features.go +++ b/vendor/k8s.io/component-base/metrics/features/kube_features.go @@ -17,23 +17,29 @@ limitations under the License. package features import ( + "k8s.io/apimachinery/pkg/util/version" "k8s.io/component-base/featuregate" ) const ( // owner: @logicalhan // kep: https://kep.k8s.io/3466 - // alpha: v1.26 ComponentSLIs featuregate.Feature = "ComponentSLIs" ) -func featureGates() map[featuregate.Feature]featuregate.FeatureSpec { - return map[featuregate.Feature]featuregate.FeatureSpec{ - ComponentSLIs: {Default: true, PreRelease: featuregate.Beta}, +func featureGates() map[featuregate.Feature]featuregate.VersionedSpecs { + return map[featuregate.Feature]featuregate.VersionedSpecs{ + ComponentSLIs: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + // ComponentSLIs officially graduated to GA in v1.29 but the gate was not updated until v1.32. + // To support emulated versions, keep the gate until v1.35. + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, } } // AddFeatureGates adds all feature gates used by this package. -func AddFeatureGates(mutableFeatureGate featuregate.MutableFeatureGate) error { - return mutableFeatureGate.Add(featureGates()) +func AddFeatureGates(mutableFeatureGate featuregate.MutableVersionedFeatureGate) error { + return mutableFeatureGate.AddVersioned(featureGates()) } diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go index 89631115a..0621560d0 100644 --- a/vendor/k8s.io/component-base/metrics/gauge.go +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" @@ -105,11 +106,6 @@ func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() cv := &GaugeVec{ GaugeVec: noopGaugeVec, @@ -149,6 +145,15 @@ func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } elt, err := v.GaugeVec.GetMetricWithLabelValues(lvs...) return elt, err @@ -186,6 +191,15 @@ func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } elt, err := v.GaugeVec.GetMetricWith(labels) return elt, err @@ -226,6 +240,13 @@ func (v *GaugeVec) Reset() { v.GaugeVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the GaugeVec. +// NOTE: This should only be used in test. +func (v *GaugeVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc { g := NewGauge(opts) diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go index e6884f35c..3065486ab 100644 --- a/vendor/k8s.io/component-base/metrics/histogram.go +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -18,20 +18,59 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/trace" ) // Histogram is our internal representation for our wrapping struct around prometheus // histograms. Summary implements both kubeCollector and ObserverMetric type Histogram struct { + ctx context.Context ObserverMetric *HistogramOpts lazyMetric selfCollector } +// exemplarHistogramMetric holds a context to extract exemplar labels from, and a historgram metric to attach them to. It implements the metricWithExemplar interface. +type exemplarHistogramMetric struct { + *Histogram +} + +type exemplarHistogramVec struct { + *HistogramVecWithContext + observer prometheus.Observer +} + +func (h *Histogram) Observe(v float64) { + h.withExemplar(v) +} + +// withExemplar initializes the exemplarMetric object and sets the exemplar value. +func (h *Histogram) withExemplar(v float64) { + (&exemplarHistogramMetric{h}).withExemplar(v) +} + +// withExemplar attaches an exemplar to the metric. +func (e *exemplarHistogramMetric) withExemplar(v float64) { + if m, ok := e.Histogram.ObserverMetric.(prometheus.ExemplarObserver); ok { + maybeSpanCtx := trace.SpanContextFromContext(e.ctx) + if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() { + exemplarLabels := prometheus.Labels{ + "trace_id": maybeSpanCtx.TraceID().String(), + "span_id": maybeSpanCtx.SpanID().String(), + } + m.ObserveWithExemplar(v, exemplarLabels) + return + } + } + + e.ObserverMetric.Observe(v) +} + // NewHistogram returns an object which is Histogram-like. However, nothing // will be measured until the histogram is registered somewhere. func NewHistogram(opts *HistogramOpts) *Histogram { @@ -74,6 +113,7 @@ func (h *Histogram) initializeDeprecatedMetric() { // WithContext allows the normal Histogram metric to pass in context. The context is no-op now. func (h *Histogram) WithContext(ctx context.Context) ObserverMetric { + h.ctx = ctx return h.ObserverMetric } @@ -96,11 +136,6 @@ func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() v := &HistogramVec{ HistogramVec: noopHistogramVec, @@ -148,6 +183,15 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } return v.HistogramVec.WithLabelValues(lvs...) } @@ -162,6 +206,15 @@ func (v *HistogramVec) With(labels map[string]string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } return v.HistogramVec.With(labels) } @@ -189,6 +242,13 @@ func (v *HistogramVec) Reset() { v.HistogramVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the HistogramVec. +// NOTE: This should only be used in test. +func (v *HistogramVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped HistogramVec with context func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext { return &HistogramVecWithContext{ @@ -203,12 +263,37 @@ type HistogramVecWithContext struct { ctx context.Context } +func (h *exemplarHistogramVec) Observe(v float64) { + h.withExemplar(v) +} + +func (h *exemplarHistogramVec) withExemplar(v float64) { + if m, ok := h.observer.(prometheus.ExemplarObserver); ok { + maybeSpanCtx := trace.SpanContextFromContext(h.HistogramVecWithContext.ctx) + if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() { + m.ObserveWithExemplar(v, prometheus.Labels{ + "trace_id": maybeSpanCtx.TraceID().String(), + "span_id": maybeSpanCtx.SpanID().String(), + }) + return + } + } + + h.observer.Observe(v) +} + // WithLabelValues is the wrapper of HistogramVec.WithLabelValues. -func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { - return vc.HistogramVec.WithLabelValues(lvs...) +func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) *exemplarHistogramVec { + return &exemplarHistogramVec{ + HistogramVecWithContext: vc, + observer: vc.HistogramVec.WithLabelValues(lvs...), + } } // With is the wrapper of HistogramVec.With. -func (vc *HistogramVecWithContext) With(labels map[string]string) ObserverMetric { - return vc.HistogramVec.With(labels) +func (vc *HistogramVecWithContext) With(labels map[string]string) *exemplarHistogramVec { + return &exemplarHistogramVec{ + HistogramVecWithContext: vc, + observer: vc.HistogramVec.With(labels), + } } diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go index d68a98c44..c8b083995 100644 --- a/vendor/k8s.io/component-base/metrics/metric.go +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -22,8 +22,8 @@ import ( "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" - promext "k8s.io/component-base/metrics/prometheusextension" + promext "k8s.io/component-base/metrics/prometheusextension" "k8s.io/klog/v2" ) @@ -210,6 +210,11 @@ func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { ch <- c.metric } +// metricWithExemplar is an interface that knows how to attach an exemplar to certain supported metric types. +type metricWithExemplar interface { + withExemplar(v float64) +} + // no-op vecs for convenience var noopCounterVec = &prometheus.CounterVec{} var noopHistogramVec = &prometheus.HistogramVec{} diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go index 2c72cb48f..17f44ef2a 100644 --- a/vendor/k8s.io/component-base/metrics/options.go +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -129,7 +129,7 @@ func validateAllowMetricLabel(allowListMapping map[string]string) error { for k := range allowListMapping { reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) if reg.FindString(k) != k { - return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName,labelName=labelValue, labelValue,...`") } } return nil diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go index 30dfd2e3d..43015169e 100644 --- a/vendor/k8s.io/component-base/metrics/opts.go +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -25,11 +25,11 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/yaml.v2" "k8s.io/apimachinery/pkg/util/sets" promext "k8s.io/component-base/metrics/prometheusextension" "k8s.io/klog/v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) var ( @@ -37,6 +37,14 @@ var ( allowListLock sync.RWMutex ) +// ResetLabelValueAllowLists resets the allow lists for label values. +// NOTE: This should only be used in test. +func ResetLabelValueAllowLists() { + allowListLock.Lock() + defer allowListLock.Unlock() + labelValueAllowLists = map[string]*MetricLabelAllowList{} +} + // KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure // is purposefully not embedded here because that would change struct initialization // in the manner which people are currently accustomed. @@ -44,16 +52,17 @@ var ( // Name must be set to a non-empty string. DeprecatedVersion is defined only // if the metric for which this options applies is, in fact, deprecated. type KubeOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // BuildFQName joins the given three name components by "_". Empty name @@ -160,17 +169,18 @@ func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { // and can safely be left at their zero value, although it is strongly // encouraged to set a Help string. type HistogramOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - Buckets []float64 - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // Modify help description on the metric description. @@ -206,18 +216,19 @@ func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { // and can safely be left at their zero value, although it is strongly // encouraged to set a Help string. type TimingHistogramOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - Buckets []float64 - InitialValue float64 - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + InitialValue float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // Modify help description on the metric description. @@ -255,20 +266,21 @@ func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts // a help string and to explicitly set the Objectives field to the desired value // as the default value will change in the upcoming v0.10 of the library. type SummaryOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - Objectives map[float64]float64 - MaxAge time.Duration - AgeBuckets uint32 - BufCap uint32 - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // Modify help description on the metric description. @@ -315,7 +327,7 @@ func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { } type MetricLabelAllowList struct { - labelToAllowList map[string]sets.String + labelToAllowList map[string]sets.Set[string] } func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) { @@ -347,13 +359,13 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { for metricLabelName, labelValues := range allowListMapping { metricName := strings.Split(metricLabelName, ",")[0] labelName := strings.Split(metricLabelName, ",")[1] - valueSet := sets.NewString(strings.Split(labelValues, ",")...) + valueSet := sets.New[string](strings.Split(labelValues, ",")...) allowList, ok := labelValueAllowLists[metricName] if ok { allowList.labelToAllowList[labelName] = valueSet } else { - labelToAllowList := make(map[string]sets.String) + labelToAllowList := make(map[string]sets.Set[string]) labelToAllowList[labelName] = valueSet labelValueAllowLists[metricName] = &MetricLabelAllowList{ labelToAllowList, @@ -363,8 +375,6 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { } func SetLabelAllowListFromManifest(manifest string) { - allowListLock.Lock() - defer allowListLock.Unlock() allowListMapping := make(map[string]string) data, err := os.ReadFile(filepath.Clean(manifest)) if err != nil { diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go index 4b5e76935..f4b98f8eb 100644 --- a/vendor/k8s.io/component-base/metrics/processstarttime.go +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -35,7 +35,7 @@ var processStartTime = NewGaugeVec( // a prometheus registry. This metric needs to be included to ensure counter // data fidelity. func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { - start, err := getProcessStart() + start, err := GetProcessStart() if err != nil { klog.Errorf("Could not get process start time, %v", err) start = float64(time.Now().Unix()) diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go index a14cd8833..611a12906 100644 --- a/vendor/k8s.io/component-base/metrics/processstarttime_others.go +++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/procfs" ) -func getProcessStart() (float64, error) { +func GetProcessStart() (float64, error) { pid := os.Getpid() p, err := procfs.NewProc(pid) if err != nil { diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go index 7813115e7..afee6f9b1 100644 --- a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go +++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -23,7 +23,7 @@ import ( "golang.org/x/sys/windows" ) -func getProcessStart() (float64, error) { +func GetProcessStart() (float64, error) { processHandle := windows.CurrentProcess() var creationTime, exitTime, kernelTime, userTime windows.Filetime diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go index d40421645..f1af12175 100644 --- a/vendor/k8s.io/component-base/metrics/summary.go +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" @@ -109,11 +110,6 @@ func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() v := &SummaryVec{ SummaryOpts: opts, @@ -160,6 +156,15 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } return v.SummaryVec.WithLabelValues(lvs...) } @@ -174,6 +179,15 @@ func (v *SummaryVec) With(labels map[string]string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } return v.SummaryVec.With(labels) } @@ -201,6 +215,13 @@ func (v *SummaryVec) Reset() { v.SummaryVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the SummaryVec. +// NOTE: This should only be used in test. +func (v *SummaryVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped SummaryVec with context func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext { return &SummaryVecWithContext{ diff --git a/vendor/k8s.io/component-base/metrics/testutil/metrics.go b/vendor/k8s.io/component-base/metrics/testutil/metrics.go index c595f55d6..05d15b08d 100644 --- a/vendor/k8s.io/component-base/metrics/testutil/metrics.go +++ b/vendor/k8s.io/component-base/metrics/testutil/metrics.go @@ -258,12 +258,8 @@ func GetHistogramVecFromGatherer(gatherer metrics.Gatherer, metricName string, l if err != nil { return nil, err } - for _, mFamily := range m { - if mFamily.GetName() == metricName { - metricFamily = mFamily - break - } - } + + metricFamily = findMetricFamily(m, metricName) if metricFamily == nil { return nil, fmt.Errorf("metric %q not found", metricName) @@ -433,3 +429,47 @@ func LabelsMatch(metric *dto.Metric, labelFilter map[string]string) bool { return true } + +// GetCounterVecFromGatherer collects a counter that matches the given name +// from a gatherer implementing k8s.io/component-base/metrics.Gatherer interface. +// It returns all counter values that had a label with a certain name in a map +// that uses the label value as keys. +// +// Used only for testing purposes where we need to gather metrics directly from a running binary (without metrics endpoint). +func GetCounterValuesFromGatherer(gatherer metrics.Gatherer, metricName string, lvMap map[string]string, labelName string) (map[string]float64, error) { + m, err := gatherer.Gather() + if err != nil { + return nil, err + } + + metricFamily := findMetricFamily(m, metricName) + if metricFamily == nil { + return nil, fmt.Errorf("metric %q not found", metricName) + } + if len(metricFamily.GetMetric()) == 0 { + return nil, fmt.Errorf("metric %q is empty", metricName) + } + + values := make(map[string]float64) + for _, metric := range metricFamily.GetMetric() { + if LabelsMatch(metric, lvMap) { + if counter := metric.GetCounter(); counter != nil { + for _, labelPair := range metric.Label { + if labelPair.GetName() == labelName { + values[labelPair.GetValue()] = counter.GetValue() + } + } + } + } + } + return values, nil +} + +func findMetricFamily(metricFamilies []*dto.MetricFamily, metricName string) *dto.MetricFamily { + for _, mFamily := range metricFamilies { + if mFamily.GetName() == metricName { + return mFamily + } + } + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/timing_histogram.go b/vendor/k8s.io/component-base/metrics/timing_histogram.go index a0f0b253c..4fc757473 100644 --- a/vendor/k8s.io/component-base/metrics/timing_histogram.go +++ b/vendor/k8s.io/component-base/metrics/timing_histogram.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "sync" "time" "github.com/blang/semver/v4" @@ -125,11 +126,6 @@ func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogr opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() v := &TimingHistogramVec{ TimingHistogramVec: noopTimingHistogramVec, @@ -175,6 +171,15 @@ func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...) if err != nil { @@ -214,6 +219,15 @@ func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } ops, err := v.TimingHistogramVec.GetMetricWith(labels) return ops.(GaugeMetric), err @@ -254,6 +268,13 @@ func (v *TimingHistogramVec) Reset() { v.TimingHistogramVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the TimingHistogramVec. +// NOTE: This should only be used in test. +func (v *TimingHistogramVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped TimingHistogramVec with context func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { return &TimingHistogramVecWithContext{ diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go index 601546782..46500118a 100644 --- a/vendor/k8s.io/component-base/version/base.go +++ b/vendor/k8s.io/component-base/version/base.go @@ -66,5 +66,5 @@ const ( // DefaultKubeBinaryVersion is the hard coded k8 binary version based on the latest K8s release. // It is supposed to be consistent with gitMajor and gitMinor, except for local tests, where gitMajor and gitMinor are "". // Should update for each minor release! - DefaultKubeBinaryVersion = "1.31" + DefaultKubeBinaryVersion = "1.32" ) diff --git a/vendor/k8s.io/component-base/version/version.go b/vendor/k8s.io/component-base/version/version.go index 1d268d4c6..99d368534 100644 --- a/vendor/k8s.io/component-base/version/version.go +++ b/vendor/k8s.io/component-base/version/version.go @@ -19,10 +19,41 @@ package version import ( "fmt" "runtime" + "sync/atomic" + "k8s.io/apimachinery/pkg/util/version" apimachineryversion "k8s.io/apimachinery/pkg/version" ) +type EffectiveVersion interface { + BinaryVersion() *version.Version + EmulationVersion() *version.Version + MinCompatibilityVersion() *version.Version + EqualTo(other EffectiveVersion) bool + String() string + Validate() []error +} + +type MutableEffectiveVersion interface { + EffectiveVersion + Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) + SetEmulationVersion(emulationVersion *version.Version) + SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) +} + +type effectiveVersion struct { + // When true, BinaryVersion() returns the current binary version + useDefaultBuildBinaryVersion atomic.Bool + // Holds the last binary version stored in Set() + binaryVersion atomic.Pointer[version.Version] + // If the emulationVersion is set by the users, it could only contain major and minor versions. + // In tests, emulationVersion could be the same as the binary version, or set directly, + // which can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + emulationVersion atomic.Pointer[version.Version] + // minCompatibilityVersion could only contain major and minor versions. + minCompatibilityVersion atomic.Pointer[version.Version] +} + // Get returns the overall codebase version. It's for detecting // what code a binary was built from. func Get() apimachineryversion.Info { @@ -40,3 +71,129 @@ func Get() apimachineryversion.Info { Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), } } + +func (m *effectiveVersion) BinaryVersion() *version.Version { + if m.useDefaultBuildBinaryVersion.Load() { + return defaultBuildBinaryVersion() + } + return m.binaryVersion.Load() +} + +func (m *effectiveVersion) EmulationVersion() *version.Version { + ver := m.emulationVersion.Load() + if ver != nil { + // Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + // The pre-release should not be accessible to the users. + return ver.WithPreRelease(m.BinaryVersion().PreRelease()) + } + return ver +} + +func (m *effectiveVersion) MinCompatibilityVersion() *version.Version { + return m.minCompatibilityVersion.Load() +} + +func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool { + return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion()) +} + +func (m *effectiveVersion) String() string { + if m == nil { + return "" + } + return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}", + m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String()) +} + +func majorMinor(ver *version.Version) *version.Version { + if ver == nil { + return ver + } + return version.MajorMinor(ver.Major(), ver.Minor()) +} + +func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) { + m.binaryVersion.Store(binaryVersion) + m.useDefaultBuildBinaryVersion.Store(false) + m.emulationVersion.Store(majorMinor(emulationVersion)) + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) { + m.emulationVersion.Store(majorMinor(emulationVersion)) +} + +func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) { + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) Validate() []error { + var errs []error + // Validate only checks the major and minor versions. + binaryVersion := m.BinaryVersion().WithPatch(0) + emulationVersion := m.emulationVersion.Load() + minCompatibilityVersion := m.minCompatibilityVersion.Load() + + // emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}. + maxEmuVer := binaryVersion + minEmuVer := binaryVersion.SubtractMinor(1) + if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) { + errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String())) + } + // minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha. + maxCompVer := binaryVersion.SubtractMinor(1) + minCompVer := binaryVersion.SubtractMinor(1) + if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) { + errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String())) + } + return errs +} + +func newEffectiveVersion(binaryVersion *version.Version, useDefaultBuildBinaryVersion bool) MutableEffectiveVersion { + effective := &effectiveVersion{} + compatVersion := binaryVersion.SubtractMinor(1) + effective.Set(binaryVersion, binaryVersion, compatVersion) + effective.useDefaultBuildBinaryVersion.Store(useDefaultBuildBinaryVersion) + return effective +} + +func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion { + if binaryVer == "" { + return &effectiveVersion{} + } + binaryVersion := version.MustParse(binaryVer) + return newEffectiveVersion(binaryVersion, false) +} + +func defaultBuildBinaryVersion() *version.Version { + verInfo := Get() + return version.MustParse(verInfo.String()).WithInfo(verInfo) +} + +// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the +// current build information. +func DefaultBuildEffectiveVersion() MutableEffectiveVersion { + binaryVersion := defaultBuildBinaryVersion() + if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 { + return DefaultKubeEffectiveVersion() + } + return newEffectiveVersion(binaryVersion, true) +} + +// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the +// latest K8s release. +func DefaultKubeEffectiveVersion() MutableEffectiveVersion { + binaryVersion := version.MustParse(DefaultKubeBinaryVersion).WithInfo(Get()) + return newEffectiveVersion(binaryVersion, false) +} + +// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components. +// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31. +func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error { + binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor()) + if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) { + return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s", + binaryVersion.String(), effectiveVersion.EmulationVersion().String()) + } + return nil +} diff --git a/vendor/k8s.io/component-base/zpages/features/doc.go b/vendor/k8s.io/component-base/zpages/features/doc.go new file mode 100644 index 000000000..b2fa2809a --- /dev/null +++ b/vendor/k8s.io/component-base/zpages/features/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package features contains a separate feature set specifically designed for +// managing zpages related features. These feature gates control the +// availability and behavior of various zpages within Kubernetes components. +// New zpages added to Kubernetes components should utilize this feature set +// to ensure proper management of their availability. +package features diff --git a/vendor/k8s.io/component-base/zpages/features/kube_features.go b/vendor/k8s.io/component-base/zpages/features/kube_features.go new file mode 100644 index 000000000..ed5e41e01 --- /dev/null +++ b/vendor/k8s.io/component-base/zpages/features/kube_features.go @@ -0,0 +1,52 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "k8s.io/apimachinery/pkg/util/version" + "k8s.io/component-base/featuregate" +) + +const ( + // owner: @richabanker + // kep: https://kep.k8s.io/4828 + ComponentFlagz featuregate.Feature = "ComponentFlagz" + + // owner: @richabanker + // kep: https://kep.k8s.io/4827 + // alpha: v1.32 + // + // Enables /statusz endpoint for a component making it accessible to + // users with the system:monitoring cluster role. + ComponentStatusz featuregate.Feature = "ComponentStatusz" +) + +func featureGates() map[featuregate.Feature]featuregate.VersionedSpecs { + return map[featuregate.Feature]featuregate.VersionedSpecs{ + ComponentFlagz: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + ComponentStatusz: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + } +} + +// AddFeatureGates adds all feature gates used by this package. +func AddFeatureGates(mutableFeatureGate featuregate.MutableVersionedFeatureGate) error { + return mutableFeatureGate.AddVersioned(featureGates()) +} diff --git a/vendor/k8s.io/component-base/zpages/flagz/flagreader.go b/vendor/k8s.io/component-base/zpages/flagz/flagreader.go new file mode 100644 index 000000000..7e05928eb --- /dev/null +++ b/vendor/k8s.io/component-base/zpages/flagz/flagreader.go @@ -0,0 +1,52 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagz + +import ( + "github.com/spf13/pflag" + cliflag "k8s.io/component-base/cli/flag" +) + +type Reader interface { + GetFlagz() map[string]string +} + +// NamedFlagSetsGetter implements Reader for cliflag.NamedFlagSets +type NamedFlagSetsReader struct { + FlagSets cliflag.NamedFlagSets +} + +func (n NamedFlagSetsReader) GetFlagz() map[string]string { + return convertNamedFlagSetToFlags(&n.FlagSets) +} + +func convertNamedFlagSetToFlags(flagSets *cliflag.NamedFlagSets) map[string]string { + flags := make(map[string]string) + for _, fs := range flagSets.FlagSets { + fs.VisitAll(func(flag *pflag.Flag) { + if flag.Value != nil { + value := flag.Value.String() + if set, ok := flag.Annotations["classified"]; ok && len(set) > 0 { + value = "CLASSIFIED" + } + flags[flag.Name] = value + } + }) + } + + return flags +} diff --git a/vendor/k8s.io/component-base/zpages/flagz/flagz.go b/vendor/k8s.io/component-base/zpages/flagz/flagz.go new file mode 100644 index 000000000..e6b52c30a --- /dev/null +++ b/vendor/k8s.io/component-base/zpages/flagz/flagz.go @@ -0,0 +1,126 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagz + +import ( + "bytes" + "fmt" + "io" + "math/rand" + "net/http" + "sort" + "strings" + "sync" + + "github.com/munnerz/goautoneg" + + "k8s.io/klog/v2" +) + +const ( + flagzHeaderFmt = ` +%s flags +Warning: This endpoint is not meant to be machine parseable, has no formatting compatibility guarantees and is for debugging purposes only. + +` +) + +var ( + flagzSeparators = []string{":", ": ", "=", " "} + errUnsupportedMediaType = fmt.Errorf("media type not acceptable, must be: text/plain") +) + +type registry struct { + response bytes.Buffer + once sync.Once +} + +type mux interface { + Handle(path string, handler http.Handler) +} + +func Install(m mux, componentName string, flagReader Reader) { + var reg registry + reg.installHandler(m, componentName, flagReader) +} + +func (reg *registry) installHandler(m mux, componentName string, flagReader Reader) { + m.Handle("/flagz", reg.handleFlags(componentName, flagReader)) +} + +func (reg *registry) handleFlags(componentName string, flagReader Reader) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !acceptableMediaType(r) { + http.Error(w, errUnsupportedMediaType.Error(), http.StatusNotAcceptable) + return + } + + reg.once.Do(func() { + fmt.Fprintf(®.response, flagzHeaderFmt, componentName) + if flagReader == nil { + klog.Error("received nil flagReader") + return + } + + randomIndex := rand.Intn(len(flagzSeparators)) + separator := flagzSeparators[randomIndex] + // Randomize the delimiter for printing to prevent scraping of the response. + printSortedFlags(®.response, flagReader.GetFlagz(), separator) + }) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + _, err := w.Write(reg.response.Bytes()) + if err != nil { + klog.Errorf("error writing response: %v", err) + http.Error(w, "error writing response", http.StatusInternalServerError) + } + } +} + +func acceptableMediaType(r *http.Request) bool { + accepts := goautoneg.ParseAccept(r.Header.Get("Accept")) + for _, accept := range accepts { + if !mediaTypeMatches(accept) { + continue + } + if len(accept.Params) == 0 { + return true + } + if len(accept.Params) == 1 { + if charset, ok := accept.Params["charset"]; ok && strings.EqualFold(charset, "utf-8") { + return true + } + } + } + return false +} + +func mediaTypeMatches(a goautoneg.Accept) bool { + return (a.Type == "text" || a.Type == "*") && + (a.SubType == "plain" || a.SubType == "*") +} + +func printSortedFlags(w io.Writer, flags map[string]string, separator string) { + var sortedKeys []string + for key := range flags { + sortedKeys = append(sortedKeys, key) + } + + sort.Strings(sortedKeys) + for _, key := range sortedKeys { + fmt.Fprintf(w, "%s%s%s\n", key, separator, flags[key]) + } +} diff --git a/vendor/k8s.io/component-helpers/resource/OWNERS b/vendor/k8s.io/component-helpers/resource/OWNERS new file mode 100644 index 000000000..d238eb22d --- /dev/null +++ b/vendor/k8s.io/component-helpers/resource/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - sig-node-reviewers + - sig-scheduling +labels: + - sig/node + - sig/scheduling + - kind/api-change diff --git a/vendor/k8s.io/component-helpers/resource/helpers.go b/vendor/k8s.io/component-helpers/resource/helpers.go new file mode 100644 index 000000000..3bdcef6e8 --- /dev/null +++ b/vendor/k8s.io/component-helpers/resource/helpers.go @@ -0,0 +1,382 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// ContainerType signifies container type +type ContainerType int + +const ( + // Containers is for normal containers + Containers ContainerType = 1 << iota + // InitContainers is for init containers + InitContainers +) + +// PodResourcesOptions controls the behavior of PodRequests and PodLimits. +type PodResourcesOptions struct { + // Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits + // functions. All existing values in Reuse will be lost. + Reuse v1.ResourceList + // UseStatusResources indicates whether resources reported by the PodStatus should be considered + // when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling + // feature is not enabled. + UseStatusResources bool + // ExcludeOverhead controls if pod overhead is excluded from the calculation. + ExcludeOverhead bool + // ContainerFn is called with the effective resources required for each container within the pod. + ContainerFn func(res v1.ResourceList, containerType ContainerType) + // NonMissingContainerRequests if provided will replace any missing container level requests for the specified resources + // with the given values. If the requests for those resources are explicitly set, even if zero, they will not be modified. + NonMissingContainerRequests v1.ResourceList + // SkipPodLevelResources controls whether pod-level resources should be skipped + // from the calculation. If pod-level resources are not set in PodSpec, + // pod-level resources will always be skipped. + SkipPodLevelResources bool +} + +var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory) + +func SupportedPodLevelResources() sets.Set[v1.ResourceName] { + return supportedPodLevelResources +} + +// IsSupportedPodLevelResources checks if a given resource is supported by pod-level +// resource management through the PodLevelResources feature. Returns true if +// the resource is supported. +func IsSupportedPodLevelResource(name v1.ResourceName) bool { + return supportedPodLevelResources.Has(name) +} + +// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set. +// It returns true if either the Requests or Limits maps are non-empty. +func IsPodLevelResourcesSet(pod *v1.Pod) bool { + if pod.Spec.Resources == nil { + return false + } + + if (len(pod.Spec.Resources.Requests) + len(pod.Spec.Resources.Limits)) == 0 { + return false + } + + for resourceName := range pod.Spec.Resources.Requests { + if IsSupportedPodLevelResource(resourceName) { + return true + } + } + + for resourceName := range pod.Spec.Resources.Limits { + if IsSupportedPodLevelResource(resourceName) { + return true + } + } + + return false +} + +// IsPodLevelRequestsSet checks if pod-level requests are set. It returns true if +// Requests map is non-empty. +func IsPodLevelRequestsSet(pod *v1.Pod) bool { + if pod.Spec.Resources == nil { + return false + } + + if len(pod.Spec.Resources.Requests) == 0 { + return false + } + + for resourceName := range pod.Spec.Resources.Requests { + if IsSupportedPodLevelResource(resourceName) { + return true + } + } + + return false +} + +// PodRequests computes the total pod requests per the PodResourcesOptions supplied. +// If PodResourcesOptions is nil, then the requests are returned including pod overhead. +// If the PodLevelResources feature is enabled AND the pod-level resources are set, +// those pod-level values are used in calculating Pod Requests. +// The computation is part of the API and must be reviewed as an API change. +func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { + reqs := AggregateContainerRequests(pod, opts) + if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) { + for resourceName, quantity := range pod.Spec.Resources.Requests { + if IsSupportedPodLevelResource(resourceName) { + reqs[resourceName] = quantity + } + } + } + + // Add overhead for running a pod to the sum of requests if requested: + if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { + addResourceList(reqs, pod.Spec.Overhead) + } + + return reqs +} + +// AggregateContainerRequests computes the total resource requests of all the containers +// in a pod. This computation folows the formula defined in the KEP for sidecar +// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission +// for more details. +func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { + // attempt to reuse the maps if passed, or allocate otherwise + reqs := reuseOrClearResourceList(opts.Reuse) + var containerStatuses map[string]*v1.ContainerStatus + if opts.UseStatusResources { + containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) + for i := range pod.Status.ContainerStatuses { + containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] + } + } + + for _, container := range pod.Spec.Containers { + containerReqs := container.Resources.Requests + if opts.UseStatusResources { + cs, found := containerStatuses[container.Name] + if found && cs.Resources != nil { + if pod.Status.Resize == v1.PodResizeStatusInfeasible { + containerReqs = cs.Resources.Requests.DeepCopy() + } else { + containerReqs = max(container.Resources.Requests, cs.Resources.Requests) + } + } + } + + if len(opts.NonMissingContainerRequests) > 0 { + containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests) + } + + if opts.ContainerFn != nil { + opts.ContainerFn(containerReqs, Containers) + } + + addResourceList(reqs, containerReqs) + } + + restartableInitContainerReqs := v1.ResourceList{} + initContainerReqs := v1.ResourceList{} + // init containers define the minimum of any resource + // Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value + // + // Let's say `InitContainerUse(i)` is the resource requirements when the i-th + // init container is initializing, then + // `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. + for _, container := range pod.Spec.InitContainers { + containerReqs := container.Resources.Requests + if len(opts.NonMissingContainerRequests) > 0 { + containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests) + } + + if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { + // and add them to the resulting cumulative container requests + addResourceList(reqs, containerReqs) + + // track our cumulative restartable init container resources + addResourceList(restartableInitContainerReqs, containerReqs) + containerReqs = restartableInitContainerReqs + } else { + tmp := v1.ResourceList{} + addResourceList(tmp, containerReqs) + addResourceList(tmp, restartableInitContainerReqs) + containerReqs = tmp + } + + if opts.ContainerFn != nil { + opts.ContainerFn(containerReqs, InitContainers) + } + maxResourceList(initContainerReqs, containerReqs) + } + + maxResourceList(reqs, initContainerReqs) + return reqs +} + +// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values +func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList { + cp := v1.ResourceList{} + for k, v := range reqs { + cp[k] = v.DeepCopy() + } + + for k, v := range nonMissing { + if _, found := reqs[k]; !found { + rk := cp[k] + rk.Add(v) + cp[k] = rk + } + } + return cp +} + +// PodLimits computes the pod limits per the PodResourcesOptions supplied. If PodResourcesOptions is nil, then +// the limits are returned including pod overhead for any non-zero limits. The computation is part of the API and must be reviewed +// as an API change. +func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { + // attempt to reuse the maps if passed, or allocate otherwise + limits := AggregateContainerLimits(pod, opts) + if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) { + for resourceName, quantity := range pod.Spec.Resources.Limits { + if IsSupportedPodLevelResource(resourceName) { + limits[resourceName] = quantity + } + } + } + + // Add overhead to non-zero limits if requested: + if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { + for name, quantity := range pod.Spec.Overhead { + if value, ok := limits[name]; ok && !value.IsZero() { + value.Add(quantity) + limits[name] = value + } + } + } + + return limits +} + +// AggregateContainerLimits computes the aggregated resource limits of all the containers +// in a pod. This computation follows the formula defined in the KEP for sidecar +// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission +// for more details. +func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { + // attempt to reuse the maps if passed, or allocate otherwise + limits := reuseOrClearResourceList(opts.Reuse) + var containerStatuses map[string]*v1.ContainerStatus + if opts.UseStatusResources { + containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) + for i := range pod.Status.ContainerStatuses { + containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] + } + } + + for _, container := range pod.Spec.Containers { + containerLimits := container.Resources.Limits + if opts.UseStatusResources { + cs, found := containerStatuses[container.Name] + if found && cs.Resources != nil { + if pod.Status.Resize == v1.PodResizeStatusInfeasible { + containerLimits = cs.Resources.Limits.DeepCopy() + } else { + containerLimits = max(container.Resources.Limits, cs.Resources.Limits) + } + } + } + + if opts.ContainerFn != nil { + opts.ContainerFn(containerLimits, Containers) + } + addResourceList(limits, containerLimits) + } + + restartableInitContainerLimits := v1.ResourceList{} + initContainerLimits := v1.ResourceList{} + // init containers define the minimum of any resource + // + // Let's say `InitContainerUse(i)` is the resource requirements when the i-th + // init container is initializing, then + // `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. + for _, container := range pod.Spec.InitContainers { + containerLimits := container.Resources.Limits + // Is the init container marked as a restartable init container? + if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { + addResourceList(limits, containerLimits) + + // track our cumulative restartable init container resources + addResourceList(restartableInitContainerLimits, containerLimits) + containerLimits = restartableInitContainerLimits + } else { + tmp := v1.ResourceList{} + addResourceList(tmp, containerLimits) + addResourceList(tmp, restartableInitContainerLimits) + containerLimits = tmp + } + + if opts.ContainerFn != nil { + opts.ContainerFn(containerLimits, InitContainers) + } + maxResourceList(initContainerLimits, containerLimits) + } + + maxResourceList(limits, initContainerLimits) + return limits +} + +// addResourceList adds the resources in newList to list. +func addResourceList(list, newList v1.ResourceList) { + for name, quantity := range newList { + if value, ok := list[name]; !ok { + list[name] = quantity.DeepCopy() + } else { + value.Add(quantity) + list[name] = value + } + } +} + +// maxResourceList sets list to the greater of list/newList for every resource in newList +func maxResourceList(list, newList v1.ResourceList) { + for name, quantity := range newList { + if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 { + list[name] = quantity.DeepCopy() + } + } +} + +// max returns the result of max(a, b) for each named resource and is only used if we can't +// accumulate into an existing resource list +func max(a v1.ResourceList, b v1.ResourceList) v1.ResourceList { + result := v1.ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + if value.Cmp(other) <= 0 { + result[key] = other.DeepCopy() + continue + } + } + result[key] = value.DeepCopy() + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// reuseOrClearResourceList is a helper for avoiding excessive allocations of +// resource lists within the inner loop of resource calculations. +func reuseOrClearResourceList(reuse v1.ResourceList) v1.ResourceList { + if reuse == nil { + return make(v1.ResourceList, 4) + } + for k := range reuse { + delete(reuse, k) + } + return reuse +} diff --git a/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go b/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go index 0e3b99163..88aa8efc2 100644 --- a/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go +++ b/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go @@ -200,13 +200,13 @@ func (t *nodeSelectorTerm) match(nodeLabels labels.Set, nodeFields fields.Set) ( return true, nil } -var validSelectorOperators = []string{ - string(v1.NodeSelectorOpIn), - string(v1.NodeSelectorOpNotIn), - string(v1.NodeSelectorOpExists), - string(v1.NodeSelectorOpDoesNotExist), - string(v1.NodeSelectorOpGt), - string(v1.NodeSelectorOpLt), +var validSelectorOperators = []v1.NodeSelectorOperator{ + v1.NodeSelectorOpIn, + v1.NodeSelectorOpNotIn, + v1.NodeSelectorOpExists, + v1.NodeSelectorOpDoesNotExist, + v1.NodeSelectorOpGt, + v1.NodeSelectorOpLt, } // nodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements @@ -250,9 +250,9 @@ func nodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement, path * return selector, nil } -var validFieldSelectorOperators = []string{ - string(v1.NodeSelectorOpIn), - string(v1.NodeSelectorOpNotIn), +var validFieldSelectorOperators = []v1.NodeSelectorOperator{ + v1.NodeSelectorOpIn, + v1.NodeSelectorOpNotIn, } // nodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements diff --git a/vendor/k8s.io/controller-manager/pkg/features/kube_features.go b/vendor/k8s.io/controller-manager/pkg/features/kube_features.go index 65950dd8f..417045075 100644 --- a/vendor/k8s.io/controller-manager/pkg/features/kube_features.go +++ b/vendor/k8s.io/controller-manager/pkg/features/kube_features.go @@ -17,6 +17,7 @@ limitations under the License. package features import ( + "k8s.io/apimachinery/pkg/util/version" "k8s.io/component-base/featuregate" ) @@ -24,7 +25,6 @@ const ( // Every feature gate should add method here following this template: // // // owner: @username - // // alpha: v1.4 // MyFeature featuregate.Feature = "MyFeature" // // Feature gates should be listed in alphabetical, case-sensitive @@ -34,37 +34,18 @@ const ( // owner: @nckturner // kep: http://kep.k8s.io/2699 - // alpha: v1.27 // Enable webhook in cloud controller manager CloudControllerManagerWebhook featuregate.Feature = "CloudControllerManagerWebhook" - - // owner: @danwinship - // alpha: v1.27 - // beta: v1.29 - // GA: v1.30 - // - // Enables dual-stack values in the - // `alpha.kubernetes.io/provided-node-ip` annotation - CloudDualStackNodeIPs featuregate.Feature = "CloudDualStackNodeIPs" - - // owner: @alexanderConstantinescu - // kep: http://kep.k8s.io/3458 - // beta: v1.27 - // GA: v1.30 - // - // Enables less load balancer re-configurations by the service controller - // (KCCM) as an effect of changing node state. - StableLoadBalancerNodeSet featuregate.Feature = "StableLoadBalancerNodeSet" ) -func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.MutableFeatureGate) error { - return featuregates.Add(cloudPublicFeatureGates) +func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.MutableVersionedFeatureGate) error { + return featuregates.AddVersioned(versionedCloudPublicFeatureGates) } -// cloudPublicFeatureGates consists of cloud-specific feature keys. -// To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here. -var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha}, - CloudDualStackNodeIPs: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30, remove in 1.31 +// versionedCloudPublicFeatureGates consists of versioned cloud-specific feature keys. +// To add a new feature, define a key for it above and add it here. +var versionedCloudPublicFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + CloudControllerManagerWebhook: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + }, } diff --git a/vendor/k8s.io/controller-manager/pkg/leadermigration/config/config.go b/vendor/k8s.io/controller-manager/pkg/leadermigration/config/config.go index fc8cf1714..3a5d139ea 100644 --- a/vendor/k8s.io/controller-manager/pkg/leadermigration/config/config.go +++ b/vendor/k8s.io/controller-manager/pkg/leadermigration/config/config.go @@ -25,7 +25,7 @@ import ( util "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation/field" internal "k8s.io/controller-manager/config" - "k8s.io/controller-manager/config/v1" + v1 "k8s.io/controller-manager/config/v1" "k8s.io/controller-manager/config/v1alpha1" "k8s.io/controller-manager/config/v1beta1" ) @@ -63,7 +63,7 @@ func ReadLeaderMigrationConfiguration(configFilePath string) (*internal.LeaderMi if err != nil { return nil, fmt.Errorf("unable to read leader migration configuration from %q: %w", configFilePath, err) } - config, gvk, err := serializer.NewCodecFactory(cfgScheme).UniversalDecoder().Decode(data, nil, nil) + config, gvk, err := serializer.NewCodecFactory(cfgScheme, serializer.EnableStrict).UniversalDecoder().Decode(data, nil, nil) if err != nil { return nil, err } diff --git a/vendor/k8s.io/gengo/v2/Makefile b/vendor/k8s.io/gengo/v2/Makefile new file mode 100644 index 000000000..8d0fbdaa8 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/Makefile @@ -0,0 +1,14 @@ +all: + go build ./... + +test: + GODEBUG=gotypesalias=0 go test -race ./... -count=1 + GODEBUG=gotypesalias=1 go test -race ./... -count=1 + +# We verify for the maximum version of the go directive as 1.20 +# here because the oldest go directive that exists on our supported +# release branches in k/k is 1.20. +verify: + GODEBUG=gotypesalias=0 ./hack/verify-examples.sh + GODEBUG=gotypesalias=1 ./hack/verify-examples.sh + ./hack/verify-go-directive.sh 1.20 diff --git a/vendor/k8s.io/gengo/v2/generator/execute.go b/vendor/k8s.io/gengo/v2/generator/execute.go index 02b4c3318..a1e052f5c 100644 --- a/vendor/k8s.io/gengo/v2/generator/execute.go +++ b/vendor/k8s.io/gengo/v2/generator/execute.go @@ -26,6 +26,7 @@ import ( "strings" "golang.org/x/tools/imports" + "k8s.io/gengo/v2/namer" "k8s.io/gengo/v2/types" "k8s.io/klog/v2" @@ -114,7 +115,13 @@ func assembleGoFile(w io.Writer, f *File) { } func importsWrapper(src []byte) ([]byte, error) { - return imports.Process("", src, nil) + opt := imports.Options{ + Comments: true, + TabIndent: true, + TabWidth: 8, + FormatOnly: true, // Disable the insertion and deletion of imports + } + return imports.Process("", src, &opt) } func NewGoFile() *DefaultFileType { diff --git a/vendor/k8s.io/gengo/v2/generator/import_tracker.go b/vendor/k8s.io/gengo/v2/generator/import_tracker.go index 70b86cf56..22393e4d4 100644 --- a/vendor/k8s.io/gengo/v2/generator/import_tracker.go +++ b/vendor/k8s.io/gengo/v2/generator/import_tracker.go @@ -18,6 +18,7 @@ package generator import ( "go/token" + "path/filepath" "strings" "k8s.io/klog/v2" @@ -45,7 +46,7 @@ import ( func NewImportTrackerForPackage(local string, typesToAdd ...*types.Type) *namer.DefaultImportTracker { tracker := namer.NewDefaultImportTracker(types.Name{Package: local}) tracker.IsInvalidType = func(*types.Type) bool { return false } - tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, name) } + tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, local, name) } tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" } tracker.AddTypes(typesToAdd...) @@ -56,7 +57,7 @@ func NewImportTracker(typesToAdd ...*types.Type) *namer.DefaultImportTracker { return NewImportTrackerForPackage("", typesToAdd...) } -func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { +func goTrackerLocalName(tracker namer.ImportTracker, localPkg string, t types.Name) string { path := t.Package // Using backslashes in package names causes gengo to produce Go code which @@ -64,6 +65,7 @@ func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { if strings.ContainsRune(path, '\\') { klog.Warningf("Warning: backslash used in import path '%v', this is unsupported.\n", path) } + localLeaf := filepath.Base(localPkg) dirs := strings.Split(path, namer.GoSeperator) for n := len(dirs) - 1; n >= 0; n-- { @@ -74,8 +76,13 @@ func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { // packages, but aren't legal go names. So we'll sanitize. name = strings.ReplaceAll(name, ".", "") name = strings.ReplaceAll(name, "-", "") - if _, found := tracker.PathOf(name); found { - // This name collides with some other package + if _, found := tracker.PathOf(name); found || name == localLeaf { + // This name collides with some other package. + // Or, this name is tne same name as the local package, + // which we avoid because it can be confusing. For example, + // if the local package is v1, we to avoid importing + // another package using the v1 name, and instead import + // it with a more qualified name, such as metav1. continue } diff --git a/vendor/k8s.io/gengo/v2/parser/parse.go b/vendor/k8s.io/gengo/v2/parser/parse.go index da9488b8e..d4de19e76 100644 --- a/vendor/k8s.io/gengo/v2/parser/parse.go +++ b/vendor/k8s.io/gengo/v2/parser/parse.go @@ -29,6 +29,7 @@ import ( "time" "golang.org/x/tools/go/packages" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) @@ -642,6 +643,12 @@ func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type name = *useName } + // Handle alias types conditionally on go1.22+. + // Inline this once the minimum supported version is go1.22 + if out := p.walkAliasType(u, in); out != nil { + return out + } + switch t := in.(type) { case *gotypes.Struct: out := u.Type(name) diff --git a/vendor/k8s.io/gengo/v2/parser/parse_122.go b/vendor/k8s.io/gengo/v2/parser/parse_122.go new file mode 100644 index 000000000..ec2064958 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/parser/parse_122.go @@ -0,0 +1,33 @@ +//go:build go1.22 +// +build go1.22 + +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parser + +import ( + gotypes "go/types" + + "k8s.io/gengo/v2/types" +) + +func (p *Parser) walkAliasType(u types.Universe, in gotypes.Type) *types.Type { + if t, isAlias := in.(*gotypes.Alias); isAlias { + return p.walkType(u, nil, gotypes.Unalias(t)) + } + return nil +} diff --git a/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go b/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go new file mode 100644 index 000000000..6f62100c0 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go @@ -0,0 +1,30 @@ +//go:build !go1.22 +// +build !go1.22 + +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parser + +import ( + gotypes "go/types" + + "k8s.io/gengo/v2/types" +) + +func (p *Parser) walkAliasType(u types.Universe, in gotypes.Type) *types.Type { + return nil +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go index f6dc74aa9..b2f256175 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go @@ -19,11 +19,11 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" rest "k8s.io/client-go/rest" - v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" ) type ApiregistrationV1Interface interface { @@ -85,10 +85,10 @@ func New(c rest.Interface) *ApiregistrationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := apiregistrationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go index b248437dc..a27b9848d 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go @@ -19,13 +19,13 @@ limitations under the License. package v1 import ( - "context" + context "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" gentype "k8s.io/client-go/gentype" - v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" ) @@ -37,33 +37,35 @@ type APIServicesGetter interface { // APIServiceInterface has methods to work with APIService resources. type APIServiceInterface interface { - Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (*v1.APIService, error) - Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + Create(ctx context.Context, aPIService *apiregistrationv1.APIService, opts metav1.CreateOptions) (*apiregistrationv1.APIService, error) + Update(ctx context.Context, aPIService *apiregistrationv1.APIService, opts metav1.UpdateOptions) (*apiregistrationv1.APIService, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + UpdateStatus(ctx context.Context, aPIService *apiregistrationv1.APIService, opts metav1.UpdateOptions) (*apiregistrationv1.APIService, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.APIService, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.APIServiceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiregistrationv1.APIService, error) + List(ctx context.Context, opts metav1.ListOptions) (*apiregistrationv1.APIServiceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiregistrationv1.APIService, err error) APIServiceExpansion } // aPIServices implements APIServiceInterface type aPIServices struct { - *gentype.ClientWithList[*v1.APIService, *v1.APIServiceList] + *gentype.ClientWithList[*apiregistrationv1.APIService, *apiregistrationv1.APIServiceList] } // newAPIServices returns a APIServices func newAPIServices(c *ApiregistrationV1Client) *aPIServices { return &aPIServices{ - gentype.NewClientWithList[*v1.APIService, *v1.APIServiceList]( + gentype.NewClientWithList[*apiregistrationv1.APIService, *apiregistrationv1.APIServiceList]( "apiservices", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1.APIService { return &v1.APIService{} }, - func() *v1.APIServiceList { return &v1.APIServiceList{} }), + func() *apiregistrationv1.APIService { return &apiregistrationv1.APIService{} }, + func() *apiregistrationv1.APIServiceList { return &apiregistrationv1.APIServiceList{} }, + gentype.PrefersProtobuf[*apiregistrationv1.APIService](), + ), } } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go index 5deff4d5a..fc854a641 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go @@ -139,6 +139,9 @@ func newAPILinter() *apiLinter { &rules.NamesMatch{}, &rules.OmitEmptyMatchCase{}, &rules.ListTypeMissing{}, + &rules.StreamingListTypeFieldOrder{}, + &rules.StreamingListTypeJSONTags{}, + &rules.StreamingListTypeProtoTags{}, }, } } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/markers.go b/vendor/k8s.io/kube-openapi/pkg/generators/markers.go index c4dd67d3b..a8af60b6c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/markers.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/markers.go @@ -20,9 +20,11 @@ import ( "encoding/json" "errors" "fmt" + "reflect" "regexp" "strconv" "strings" + "sync" "k8s.io/gengo/v2/types" openapi "k8s.io/kube-openapi/pkg/common" @@ -61,6 +63,34 @@ func (c *CELTag) Validate() error { return nil } +// isKnownTagCommentKey returns true if the given key is a known comment tag key. +// Known keys are identified by the json field tags in the commentTags struct. +// If the key is a composite key, only the first key part is checked, and is +// expected to be separated by the remainder of the key by a ':' or '[' delimiter. +func isKnownTagCommentKey(key string) bool { + split := func(r rune) bool { return r == ':' || r == '[' } + commentTags := strings.FieldsFunc(key, split) + if len(commentTags) == 0 { + return false + } + _, ok := tagKeys()[commentTags[0]] + return ok +} + +var tagKeys = sync.OnceValue(func() map[string]struct{} { + result := map[string]struct{}{} + t := reflect.TypeOf(commentTags{}) + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if jsonTag := field.Tag.Get("json"); jsonTag != "" { + if key, _, _ := strings.Cut(jsonTag, ","); key != "" { + result[key] = struct{}{} + } + } + } + return result +}) + // commentTags represents the parsed comment tags for a given type. These types are then used to generate schema validations. // These only include the newer prefixed tags. The older tags are still supported, // but are not included in this struct. Comment Tags are transformed into a @@ -385,12 +415,11 @@ func memberWithJSONName(t *types.Type, key string) *types.Member { return nil } -// Parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result. +// ParseCommentTags parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result. // Accepts an optional type to validate against, and a prefix to filter out markers not related to validation. // Accepts a prefix to filter out markers not related to validation. // Returns any errors encountered while parsing or validating the comment tags. func ParseCommentTags(t *types.Type, comments []string, prefix string) (*spec.Schema, error) { - markers, err := parseMarkers(comments, prefix) if err != nil { return nil, fmt.Errorf("failed to parse marker comments: %w", err) @@ -610,6 +639,8 @@ func parseMarkers(markerComments []string, prefix string) (map[string]any, error if len(key) == 0 { return nil, fmt.Errorf("cannot have empty key for marker comment") + } else if !isKnownTagCommentKey(key) { + continue } else if _, ok := parseSymbolReference(value, ""); ok { // Skip ref markers continue diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/list_type_streaming_tags.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/list_type_streaming_tags.go new file mode 100644 index 000000000..4f34d42ec --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/list_type_streaming_tags.go @@ -0,0 +1,117 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "reflect" + + "k8s.io/gengo/v2/types" +) + +// StreamingListTypeFieldOrder implements APIRule interface. +// Fields must be ordered TypeMeta, ListMeta, Items +type StreamingListTypeFieldOrder struct{} + +func (l *StreamingListTypeFieldOrder) Name() string { + return "streaming_list_type_field_order" +} +func (l *StreamingListTypeFieldOrder) Validate(t *types.Type) ([]string, error) { + if !isListType(t) { + return nil, nil + } + var fields []string + if t.Members[0].Name != "TypeMeta" { + fields = append(fields, "TypeMeta") + } + if t.Members[1].Name != "ListMeta" { + fields = append(fields, "ListMeta") + } + if t.Members[2].Name != "Items" { + fields = append(fields, "Items") + } + return fields, nil +} + +// StreamingListTypeJSONTags implements APIRule interface. +// Fields must be JSON-tagged +type StreamingListTypeJSONTags struct{} + +func (l *StreamingListTypeJSONTags) Name() string { + return "streaming_list_type_json_tags" +} + +func (l *StreamingListTypeJSONTags) Validate(t *types.Type) ([]string, error) { + if !isListType(t) { + return nil, nil + } + var fields []string + for _, m := range t.Members { + switch m.Name { + case "TypeMeta": + if reflect.StructTag(m.Tags).Get("json") != ",inline" { + fields = append(fields, "TypeMeta") + } + case "ListMeta": + if reflect.StructTag(m.Tags).Get("json") != "metadata,omitempty" { + fields = append(fields, "ListMeta") + } + case "Items": + if reflect.StructTag(m.Tags).Get("json") != "items" { + fields = append(fields, "Items") + } + } + } + return fields, nil +} + +// StreamingListTypeProtoTags implements APIRule interface. +// Fields must be Proto-tagged with specific tags for streaming to work. +type StreamingListTypeProtoTags struct{} + +func (l *StreamingListTypeProtoTags) Name() string { + return "streaming_list_type_proto_tags" +} +func (l *StreamingListTypeProtoTags) Validate(t *types.Type) ([]string, error) { + if !isListType(t) { + return nil, nil + } + var fields []string + for _, m := range t.Members { + switch m.Name { + case "TypeMeta": + if v := reflect.StructTag(m.Tags).Get("protobuf"); v != "" { + fields = append(fields, "TypeMeta") + } + case "ListMeta": + if v := reflect.StructTag(m.Tags).Get("protobuf"); v != "" && v != "bytes,1,opt,name=metadata" { + fields = append(fields, "ListMeta") + } + case "Items": + if v := reflect.StructTag(m.Tags).Get("protobuf"); v != "" && v != "bytes,2,rep,name=items" { + fields = append(fields, "Items") + } + } + } + return fields, nil +} + +func isListType(t *types.Type) bool { + return len(t.Members) == 3 && + hasNamedMember(t, "TypeMeta") && + hasNamedMember(t, "ListMeta") && + hasNamedMember(t, "Items") +} diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 5fc629773..5102e7125 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -26,10 +26,10 @@ import ( "github.com/NYTimes/gziphandler" "github.com/emicklei/go-restful/v3" - "github.com/golang/protobuf/proto" openapi_v2 "github.com/google/gnostic-models/openapiv2" "github.com/google/uuid" "github.com/munnerz/goautoneg" + "google.golang.org/protobuf/proto" klog "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/builder" diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index fc4563488..10f0b385f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -29,10 +29,10 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" openapi_v3 "github.com/google/gnostic-models/openapiv3" "github.com/google/uuid" "github.com/munnerz/goautoneg" + "google.golang.org/protobuf/proto" "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/cached" diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/LICENSE similarity index 88% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/LICENSE index 31f292dce..2f9a31fad 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2018 QRI, Inc. +Copyright (c) 2014 Alex Saskevich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go new file mode 100644 index 000000000..6e02f2d00 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go @@ -0,0 +1,26 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" +) + +var ( + rxCreditCard = regexp.MustCompile(CreditCard) + rxInt = regexp.MustCompile(Int) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxBase64 = regexp.MustCompile(Base64) + rxSSN = regexp.MustCompile(SSN) +) diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go new file mode 100644 index 000000000..4d089508a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go @@ -0,0 +1,181 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "fmt" + "net" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) +) + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += (tmpNum % 10) + 1 + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go index 08b6246ce..25e4fd09e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -4,7 +4,7 @@ import ( "math/rand" "strings" - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -25,15 +25,15 @@ func randAlphanumString() string { } var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ - func(s *string, c fuzz.Continue) { + func(s *string, c randfill.Continue) { // All OpenAPI V3 map keys must follow the corresponding // regex. Note that this restricts the range for all other // string values as well. str := randAlphanumString() *s = str }, - func(o *OpenAPI, c fuzz.Continue) { - c.FuzzNoCustom(o) + func(o *OpenAPI, c randfill.Continue) { + c.FillNoCustom(o) o.Version = "3.0.0" for i, val := range o.SecurityRequirement { if val == nil { @@ -48,45 +48,45 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ } }, - func(r *interface{}, c fuzz.Continue) { + func(r *interface{}, c randfill.Continue) { switch c.Intn(3) { case 0: *r = nil case 1: - n := c.RandString() + "x" + n := c.String(0) + "x" *r = n case 2: n := c.Float64() *r = n } }, - func(v **spec.Info, c fuzz.Continue) { + func(v **spec.Info, c randfill.Continue) { // Info is never nil *v = &spec.Info{} - c.FuzzNoCustom(*v) - (*v).Title = c.RandString() + "x" + c.FillNoCustom(*v) + (*v).Title = c.String(0) + "x" }, - func(v *Paths, c fuzz.Continue) { - c.Fuzz(&v.VendorExtensible) + func(v *Paths, c randfill.Continue) { + c.Fill(&v.VendorExtensible) num := c.Intn(5) if num > 0 { v.Paths = make(map[string]*Path) } for i := 0; i < num; i++ { val := Path{} - c.Fuzz(&val) - v.Paths["/"+c.RandString()] = &val + c.Fill(&val) + v.Paths["/"+c.String(0)] = &val } }, - func(v *SecurityScheme, c fuzz.Continue) { + func(v *SecurityScheme, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } switch c.Intn(4) { case 0: v.Type = "apiKey" - v.Name = c.RandString() + "x" + v.Name = c.String(0) + "x" switch c.Intn(3) { case 0: v.In = "query" @@ -101,17 +101,17 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ v.Type = "oauth2" v.Flows = make(map[string]*OAuthFlow) flow := OAuthFlow{} - flow.AuthorizationUrl = c.RandString() + "x" + flow.AuthorizationUrl = c.String(0) + "x" v.Flows["implicit"] = &flow flow.Scopes = make(map[string]string) flow.Scopes["foo"] = "bar" case 3: v.Type = "openIdConnect" - v.OpenIdConnectUrl = "https://" + c.RandString() + v.OpenIdConnectUrl = "https://" + c.String(0) } v.Scheme = "basic" }, - func(v *spec.Ref, c fuzz.Continue) { + func(v *spec.Ref, c randfill.Continue) { switch c.Intn(7) { case 0: *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) @@ -127,13 +127,13 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) } }, - func(v *Parameter, c fuzz.Continue) { + func(v *Parameter, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.ParameterProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.ParameterProps) + c.Fill(&v.VendorExtensible) switch c.Intn(3) { case 0: @@ -145,44 +145,44 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ v.In = "cookie" } }, - func(v *RequestBody, c fuzz.Continue) { + func(v *RequestBody, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.RequestBodyProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.RequestBodyProps) + c.Fill(&v.VendorExtensible) }, - func(v *Header, c fuzz.Continue) { + func(v *Header, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.HeaderProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.HeaderProps) + c.Fill(&v.VendorExtensible) }, - func(v *ResponsesProps, c fuzz.Continue) { - c.Fuzz(&v.Default) + func(v *ResponsesProps, c randfill.Continue) { + c.Fill(&v.Default) n := c.Intn(5) for i := 0; i < n; i++ { r2 := Response{} - c.Fuzz(&r2) + c.Fill(&r2) // HTTP Status code in 100-599 Range code := c.Intn(500) + 100 v.StatusCodeResponses = make(map[int]*Response) v.StatusCodeResponses[code] = &r2 } }, - func(v *Response, c fuzz.Continue) { + func(v *Response, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.ResponseProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.ResponseProps) + c.Fill(&v.VendorExtensible) }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) + func(v *Operation, c randfill.Continue) { + c.FillNoCustom(v) // Do not fuzz null values into the array. for i, val := range v.SecurityRequirement { if val == nil { @@ -196,85 +196,85 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ } } }, - func(v *spec.Extensions, c fuzz.Continue) { + func(v *spec.Extensions, c randfill.Continue) { numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { if *v == nil { *v = spec.Extensions{} } - (*v)["x-"+c.RandString()] = c.RandString() + (*v)["x-"+c.String(0)] = c.String(0) } }, - func(v *spec.ExternalDocumentation, c fuzz.Continue) { - c.Fuzz(&v.Description) + func(v *spec.ExternalDocumentation, c randfill.Continue) { + c.Fill(&v.Description) v.URL = "https://" + randAlphanumString() }, - func(v *spec.SchemaURL, c fuzz.Continue) { + func(v *spec.SchemaURL, c randfill.Continue) { *v = spec.SchemaURL("https://" + randAlphanumString()) }, - func(v *spec.SchemaOrBool, c fuzz.Continue) { + func(v *spec.SchemaOrBool, c randfill.Continue) { *v = spec.SchemaOrBool{} - if c.RandBool() { - v.Allows = c.RandBool() + if c.Bool() { + v.Allows = c.Bool() } else { v.Schema = &spec.Schema{} v.Allows = true - c.Fuzz(&v.Schema) + c.Fill(&v.Schema) } }, - func(v *spec.SchemaOrArray, c fuzz.Continue) { + func(v *spec.SchemaOrArray, c randfill.Continue) { *v = spec.SchemaOrArray{} - if c.RandBool() { + if c.Bool() { schema := spec.Schema{} - c.Fuzz(&schema) + c.Fill(&schema) v.Schema = &schema } else { v.Schemas = []spec.Schema{} numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { schema := spec.Schema{} - c.Fuzz(&schema) + c.Fill(&schema) v.Schemas = append(v.Schemas, schema) } } }, - func(v *spec.SchemaOrStringArray, c fuzz.Continue) { - if c.RandBool() { + func(v *spec.SchemaOrStringArray, c randfill.Continue) { + if c.Bool() { *v = spec.SchemaOrStringArray{} - if c.RandBool() { - c.Fuzz(&v.Property) + if c.Bool() { + c.Fill(&v.Property) } else { - c.Fuzz(&v.Schema) + c.Fill(&v.Schema) } } }, - func(v *spec.Schema, c fuzz.Continue) { + func(v *spec.Schema, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Ref) + c.Fill(&v.Ref) return } - if c.RandBool() { + if c.Bool() { // file schema - c.Fuzz(&v.Default) - c.Fuzz(&v.Description) - c.Fuzz(&v.Example) - c.Fuzz(&v.ExternalDocs) + c.Fill(&v.Default) + c.Fill(&v.Description) + c.Fill(&v.Example) + c.Fill(&v.ExternalDocs) - c.Fuzz(&v.Format) - c.Fuzz(&v.ReadOnly) - c.Fuzz(&v.Required) - c.Fuzz(&v.Title) + c.Fill(&v.Format) + c.Fill(&v.ReadOnly) + c.Fill(&v.Required) + c.Fill(&v.Title) v.Type = spec.StringOrArray{"file"} } else { // normal schema - c.Fuzz(&v.SchemaProps) - c.Fuzz(&v.SwaggerSchemaProps) - c.Fuzz(&v.VendorExtensible) - c.Fuzz(&v.ExtraProps) + c.Fill(&v.SchemaProps) + c.Fill(&v.SwaggerSchemaProps) + c.Fill(&v.VendorExtensible) + c.Fill(&v.ExtraProps) } }, diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5789e67ab..1b758ab25 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -22,7 +22,7 @@ import ( "strings" openapi_v2 "github.com/google/gnostic-models/openapiv2" - "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) func newSchemaError(path *Path, format string, a ...interface{}) error { diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go index e85b0f1b4..97b2f989e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go @@ -22,9 +22,9 @@ import ( "regexp" "strings" - "github.com/asaskevich/govalidator" - netutils "k8s.io/utils/net" + + "k8s.io/kube-openapi/pkg/internal/third_party/govalidator" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS index ab572136e..9978d2ceb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS @@ -7,5 +7,6 @@ reviewers: - deads2k - caesarxuchao - sttts - - ncdc - dims +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go index b32fffa0e..4f81d646b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go @@ -21,7 +21,9 @@ package qos import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/features" ) var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) @@ -39,6 +41,45 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass { return ComputePodQOS(pod) } +// zeroQuantity represents a resource.Quantity with value "0", used as a baseline +// for resource comparisons. +var zeroQuantity = resource.MustParse("0") + +// processResourceList adds non-zero quantities for supported QoS compute resources +// quantities from newList to list. +func processResourceList(list, newList core.ResourceList) { + for name, quantity := range newList { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.DeepCopy() + if _, exists := list[name]; !exists { + list[name] = delta + } else { + delta.Add(list[name]) + list[name] = delta + } + } + } +} + +// getQOSResources returns a set of resource names from the provided resource list that: +// 1. Are supported QoS compute resources +// 2. Have quantities greater than zero +func getQOSResources(list core.ResourceList) sets.Set[string] { + qosResources := sets.New[string]() + for name, quantity := range list { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosResources.Insert(string(name)) + } + } + return qosResources +} + // ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more // expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass. // A pod is besteffort if none of its containers have specified any requests or limits. @@ -48,54 +89,72 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass { func ComputePodQOS(pod *core.Pod) core.PodQOSClass { requests := core.ResourceList{} limits := core.ResourceList{} - zeroQuantity := resource.MustParse("0") isGuaranteed := true - // note, ephemeral containers are not considered for QoS as they cannot define resources - allContainers := []core.Container{} - allContainers = append(allContainers, pod.Spec.Containers...) - allContainers = append(allContainers, pod.Spec.InitContainers...) - for _, container := range allContainers { - // process requests - for name, quantity := range container.Resources.Requests { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.DeepCopy() - if _, exists := requests[name]; !exists { - requests[name] = delta - } else { - delta.Add(requests[name]) - requests[name] = delta - } + // When pod-level resources are specified, we use them to determine QoS class. + if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && + pod.Spec.Resources != nil { + if len(pod.Spec.Resources.Requests) > 0 { + // process requests + processResourceList(requests, pod.Spec.Resources.Requests) + } + + if len(pod.Spec.Resources.Limits) > 0 { + // process limits + processResourceList(limits, pod.Spec.Resources.Limits) + qosLimitResources := getQOSResources(pod.Spec.Resources.Limits) + if !qosLimitResources.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { + isGuaranteed = false } } - // process limits - qosLimitsFound := sets.NewString() - for name, quantity := range container.Resources.Limits { - if !isSupportedQoSComputeResource(name) { - continue + } else { + // note, ephemeral containers are not considered for QoS as they cannot define resources + allContainers := []core.Container{} + allContainers = append(allContainers, pod.Spec.Containers...) + allContainers = append(allContainers, pod.Spec.InitContainers...) + for _, container := range allContainers { + // process requests + for name, quantity := range container.Resources.Requests { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.DeepCopy() + if _, exists := requests[name]; !exists { + requests[name] = delta + } else { + delta.Add(requests[name]) + requests[name] = delta + } + } } - if quantity.Cmp(zeroQuantity) == 1 { - qosLimitsFound.Insert(string(name)) - delta := quantity.DeepCopy() - if _, exists := limits[name]; !exists { - limits[name] = delta - } else { - delta.Add(limits[name]) - limits[name] = delta + // process limits + qosLimitsFound := sets.NewString() + for name, quantity := range container.Resources.Limits { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosLimitsFound.Insert(string(name)) + delta := quantity.DeepCopy() + if _, exists := limits[name]; !exists { + limits[name] = delta + } else { + delta.Add(limits[name]) + limits[name] = delta + } } } - } - if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { - isGuaranteed = false + if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { + isGuaranteed = false + } } } + if len(requests) == 0 && len(limits) == 0 { return core.PodQOSBestEffort } - // Check is requests match limits for all resources. + // Check if requests match limits for all resources. if isGuaranteed { for name, req := range requests { if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index 96e9b0a5c..664eb4897 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -56,7 +56,7 @@ type Volume struct { // VolumeSource represents the source location of a volume to mount. // Only one of its members may be specified. type VolumeSource struct { - // HostPath represents file or directory on the host machine that is + // hostPath represents file or directory on the host machine that is // directly exposed to the container. This is generally used for system // agents or other privileged things that are allowed to see the host // machine. Most containers will NOT need this. @@ -65,99 +65,123 @@ type VolumeSource struct { // mount host directories as read/write. // +optional HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. + // emptyDir represents a temporary directory that shares a pod's lifetime. // +optional EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // awsElasticBlockStore represents an AWS EBS disk that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // gitRepo represents a git repository at a particular revision. + // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. + // secret represents a secret that should populate this volume. // +optional Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime + // nfs represents an NFS mount on the host that shares a pod's lifetime // +optional NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // +optional Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // persistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // rdb represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // +optional RBD *RBDVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional Quobyte *QuobyteVolumeSource - // FlexVolume represents a generic volume resource that is + // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional FlexVolume *FlexVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelet's host machine. + // cinder represents a cinder volume attached and mounted on kubelet's host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // +optional Cinder *CinderVolumeSource - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // cephFS represents a Cephfs mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional CephFS *CephFSVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional Flocker *FlockerVolumeSource - // DownwardAPI represents metadata about the pod that should populate this volume + // downwardAPI represents metadata about the pod that should populate this volume // +optional DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional AzureFile *AzureFileVolumeSource // ConfigMap represents a configMap that should populate this volume // +optional ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelet's host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. PhotonPersistentDisk *PhotonPersistentDiskVolumeSource // Items for all in one resources secrets, configmaps, and downward API Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelet's host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional ScaleIO *ScaleIOVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // +optional StorageOS *StorageOSVolumeSource - // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. // +optional CSI *CSIVolumeSource - // Ephemeral represents a volume that is handled by a cluster storage driver. + // ephemeral represents a volume that is handled by a cluster storage driver. // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, // and deleted when the pod is removed. // @@ -184,7 +208,7 @@ type VolumeSource struct { // // +optional Ephemeral *EphemeralVolumeSource - // Image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. // The volume is resolved at pod startup depending on which PullPolicy value is provided: // // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. @@ -206,77 +230,101 @@ type VolumeSource struct { // PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. // Exactly one of its members must be set. type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. + // gcePersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. + // hostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. // +optional HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // +optional Glusterfs *GlusterfsPersistentVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime + // nfs represents an NFS mount on the host that shares a pod's lifetime // +optional NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // +optional RBD *RBDPersistentVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional Quobyte *QuobyteVolumeSource - // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a + // iscsi represents an ISCSI resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional ISCSI *ISCSIPersistentVolumeSource - // FlexVolume represents a generic volume resource that is + // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional FlexVolume *FlexPersistentVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelet's host machine. + // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // +optional Cinder *CinderPersistentVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional CephFS *CephFSPersistentVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional AzureFile *AzureFilePersistentVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelet's host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelet's host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional ScaleIO *ScaleIOPersistentVolumeSource - // Local represents directly-attached storage with node affinity + // local represents directly-attached storage with node affinity // +optional Local *LocalVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://examples.k8s.io/volumes/storageos/README.md + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // +optional StorageOS *StorageOSPersistentVolumeSource - // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver. + // csi represents storage that is handled by an external CSI driver. // +optional CSI *CSIPersistentVolumeSource } @@ -1857,7 +1905,7 @@ type KeyToPath struct { Mode *int32 } -// LocalVolumeSource represents directly-attached storage with node affinity (Beta feature) +// LocalVolumeSource represents directly-attached storage with node affinity type LocalVolumeSource struct { // The full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). @@ -2763,7 +2811,7 @@ type ContainerStatus struct { // AllocatedResources represents the compute resources allocated for this container by the // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission // and after successfully admitting desired pod resize. - // +featureGate=InPlacePodVerticalScaling + // +featureGate=InPlacePodVerticalScalingAllocatedStatus // +optional AllocatedResources ResourceList // Resources represents the compute resource requests and limits that have been successfully @@ -2788,11 +2836,17 @@ type ContainerStatus struct { } type ResourceStatus struct { + // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. + // For DRA resources, the value must be "claim:/". + // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container. + // +required Name ResourceName - // List of unique Resources health. Each element in the list contains a unique resource ID and resource health. - // At a minimum, ResourceID must uniquely identify the Resource - // allocated to the Pod on the Node for the lifetime of a Pod. - // See ResourceID type for it's definition. + // List of unique resources health. Each element in the list contains an unique resource ID and its health. + // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. + // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. + // See ResourceID type definition for a specific format it has in various use cases. + // +listType=map + // +listMapKey=resourceID Resources []ResourceHealth // allow to extend this struct in future with the overall health fields or things like Device Plugin version @@ -2801,12 +2855,13 @@ type ResourceStatus struct { // ResourceID is calculated based on the source of this resource health information. // For DevicePlugin: // -// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 +// DeviceID, where DeviceID is how device plugin identifies the device. The same DeviceID can be found in PodResources API. // // DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node. +// // For DRA: // -// dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. +// //: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. type ResourceID string type ResourceHealthStatus string @@ -2818,7 +2873,7 @@ const ( ) // ResourceHealth represents the health of a resource. It has the latest device health information. -// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +// This is a part of KEP https://kep.k8s.io/4680. type ResourceHealth struct { // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. ResourceID ResourceID @@ -3554,6 +3609,20 @@ type PodSpec struct { // +featureGate=DynamicResourceAllocation // +optional ResourceClaims []PodResourceClaim + // Resources is the total amount of CPU and Memory resources required by all + // containers in the pod. It supports specifying Requests and Limits for + // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // + // This field enables fine-grained control over resource allocation for the + // entire pod, allowing resource sharing among containers in a pod. + // TODO: For beta graduation, expand this comment with a detailed explanation. + // + // This is an alpha field and requires enabling the PodLevelResources feature + // gate. + // + // +featureGate=PodLevelResources + // +optional + Resources *ResourceRequirements } // PodResourceClaim references exactly one ResourceClaim through a ClaimSource. @@ -3677,6 +3746,22 @@ const ( SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict" ) +// PodSELinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. +type PodSELinuxChangePolicy string + +const ( + // Recursive relabeling of all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + SELinuxChangePolicyRecursive PodSELinuxChangePolicy = "Recursive" + // MountOption mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + SELinuxChangePolicyMountOption PodSELinuxChangePolicy = "MountOption" +) + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -3813,6 +3898,32 @@ type PodSecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional AppArmorProfile *AppArmorProfile + // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + // Valid values are "MountOption" and "Recursive". + // + // "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + // + // "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + // "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + // + // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + // and "Recursive" for all other volumes. + // + // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + // + // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + // Note that this field cannot be set when spec.os.name is windows. + // +featureGate=SELinuxChangePolicy + // +optional + SELinuxChangePolicy *PodSELinuxChangePolicy } // SeccompProfile defines a pod/container's seccomp profile settings. @@ -4099,16 +4210,37 @@ type PodStatus struct { // +optional QOSClass PodQOSClass - // The list has one entry per init container in the manifest. The most recent successful + // Statuses of init containers in this pod. The most recent successful non-restartable // init container will have ready = true, the most recently started container will have // startTime set. + // Each init container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status InitContainerStatuses []ContainerStatus - // The list has one entry per app container in the manifest. + + // Statuses of containers in this pod. + // Each container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional ContainerStatuses []ContainerStatus - // Status for any ephemeral containers that have run in this pod. + // Statuses for any ephemeral containers that have run in this pod. + // Each ephemeral container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional EphemeralContainerStatuses []ContainerStatus @@ -4773,6 +4905,8 @@ type ServiceAccount struct { // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32. + // Prefer separate namespaces to isolate access to mounted secrets. // This field should not be used to find auto-generated service account token secrets for use outside of pods. // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. Secrets []ObjectReference @@ -5435,7 +5569,6 @@ type NamespaceList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. type Binding struct { metav1.TypeMeta // ObjectMeta describes the object that is being bound. @@ -5453,6 +5586,15 @@ type Preconditions struct { UID *types.UID } +const ( + // LogStreamStdout is the stream type for stdout. + LogStreamStdout = "Stdout" + // LogStreamStderr is the stream type for stderr. + LogStreamStderr = "Stderr" + // LogStreamAll represents the combined stdout and stderr. + LogStreamAll = "All" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodLogOptions is the query options for a Pod's logs REST call @@ -5479,7 +5621,8 @@ type PodLogOptions struct { // of log output. Timestamps bool // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime + // logs are shown from the creation of the container or sinceSeconds or sinceTime. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". TailLines *int64 // If set, the number of bytes to read from the server before terminating the // log output. This may not display a complete final line of logging, and may return @@ -5494,6 +5637,14 @@ type PodLogOptions struct { // the actual log data coming from the real kubelet). // +optional InsecureSkipTLSVerifyBackend bool + + // Specify which container log stream to return to the client. + // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr + // are returned interleaved. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". + // +featureGate=PodLogsQuerySplitStreams + // +optional + Stream *string } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5683,7 +5834,6 @@ type Event struct { metav1.ObjectMeta // The object that this event is about. Mapped to events.Event.regarding - // +optional InvolvedObject ObjectReference // Optional; this should be a short, machine understandable string that gives the reason diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS index a47166dce..274fc5464 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS @@ -16,8 +16,9 @@ reviewers: - luxas - janetkuo - justinsb - - ncdc - tallclair - jsafrane - dims - jayunit100 +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index 9980be6ea..2ec47908e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -20,15 +20,15 @@ import ( "fmt" "reflect" - v1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/core" - utilpointer "k8s.io/utils/pointer" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -380,7 +380,7 @@ func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) e // Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative. // Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate. if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 { - out.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(1) + out.Spec.TerminationGracePeriodSeconds = ptr.To[int64](1) } return nil } @@ -397,7 +397,7 @@ func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) e // Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative. // Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate. if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 { - out.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(1) + out.Spec.TerminationGracePeriodSeconds = ptr.To[int64](1) } return nil } @@ -554,3 +554,13 @@ func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.Persi func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s) } + +// Convert_Slice_string_To_Pointer_string is needed because decoding URL parameters requires manual assistance. +func Convert_Slice_string_To_Pointer_string(in *[]string, out **string, s conversion.Scope) error { + if len(*in) == 0 { + return nil + } + temp := (*in)[0] + *out = &temp + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 72fd55bb2..a058c5f7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -19,14 +19,16 @@ package v1 import ( "time" + "k8s.io/utils/ptr" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/apiserver/pkg/util/feature" + resourcehelper "k8s.io/component-helpers/resource" "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/parsers" - "k8s.io/utils/pointer" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -64,7 +66,7 @@ func SetDefaults_ReplicationController(obj *v1.ReplicationController) { } } func SetDefaults_Volume(obj *v1.Volume) { - if pointer.AllPtrFieldsNil(&obj.VolumeSource) { + if ptr.AllPtrFieldsNil(&obj.VolumeSource) { obj.VolumeSource = v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, } @@ -147,7 +149,7 @@ func SetDefaults_Service(obj *v1.Service) { if obj.Spec.Type == v1.ServiceTypeLoadBalancer { if obj.Spec.AllocateLoadBalancerNodePorts == nil { - obj.Spec.AllocateLoadBalancerNodePorts = pointer.Bool(true) + obj.Spec.AllocateLoadBalancerNodePorts = ptr.To(true) } } @@ -216,6 +218,13 @@ func SetDefaults_Pod(obj *v1.Pod) { } } } + + // Pod Requests default values must be applied after container-level default values + // have been populated. + if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) { + defaultPodRequests(obj) + } + if obj.Spec.EnableServiceLinks == nil { enableServiceLinks := v1.DefaultEnableServiceLinks obj.Spec.EnableServiceLinks = &enableServiceLinks @@ -429,3 +438,64 @@ func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) { obj.Type = &typeVol } } + +func SetDefaults_PodLogOptions(obj *v1.PodLogOptions) { + if utilfeature.DefaultFeatureGate.Enabled(features.PodLogsQuerySplitStreams) { + if obj.Stream == nil { + obj.Stream = ptr.To(v1.LogStreamAll) + } + } +} + +// defaultPodRequests applies default values for pod-level requests, only when +// pod-level limits are set, in following scenarios: +// 1. When at least one container (regular, init or sidecar) has requests set: +// The pod-level requests become equal to the effective requests of all containers +// in the pod. +// 2. When no containers have requests set: The pod-level requests become equal to +// pod-level limits. +// This defaulting behavior ensures consistent resource accounting at the pod-level +// while maintaining compatibility with the container-level specifications, as detailed +// in KEP-2837: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#proposed-validation--defaulting-rules +func defaultPodRequests(obj *v1.Pod) { + // We only populate defaults when the pod-level resources are partly specified already. + if obj.Spec.Resources == nil { + return + } + + if len(obj.Spec.Resources.Limits) == 0 { + return + } + + var podReqs v1.ResourceList + podReqs = obj.Spec.Resources.Requests + if podReqs == nil { + podReqs = make(v1.ResourceList) + } + + aggrCtrReqs := resourcehelper.AggregateContainerRequests(obj, resourcehelper.PodResourcesOptions{}) + + // When containers specify requests for a resource (supported by + // PodLevelResources feature) and pod-level requests are not set, the pod-level requests + // default to the effective requests of all the containers for that resource. + for key, aggrCtrLim := range aggrCtrReqs { + if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) { + podReqs[key] = aggrCtrLim.DeepCopy() + } + } + + // When no containers specify requests for a resource, the pod-level requests + // will default to match the pod-level limits, if pod-level + // limits exist for that resource. + for key, podLim := range obj.Spec.Resources.Limits { + if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) { + podReqs[key] = podLim.DeepCopy() + } + } + + // Only set pod-level resource requests in the PodSpec if the requirements map + // contains entries after collecting container-level requests and pod-level limits. + if len(podReqs) > 0 { + obj.Spec.Resources.Requests = podReqs + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index 19b869eba..6a842771a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -25,7 +25,7 @@ import ( url "net/url" unsafe "unsafe" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" @@ -42,2380 +42,2385 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1.AWSElasticBlockStoreVolumeSource)(nil), (*core.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(a.(*v1.AWSElasticBlockStoreVolumeSource), b.(*core.AWSElasticBlockStoreVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AWSElasticBlockStoreVolumeSource)(nil), (*core.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(a.(*corev1.AWSElasticBlockStoreVolumeSource), b.(*core.AWSElasticBlockStoreVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AWSElasticBlockStoreVolumeSource)(nil), (*v1.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(a.(*core.AWSElasticBlockStoreVolumeSource), b.(*v1.AWSElasticBlockStoreVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.AWSElasticBlockStoreVolumeSource)(nil), (*corev1.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(a.(*core.AWSElasticBlockStoreVolumeSource), b.(*corev1.AWSElasticBlockStoreVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Affinity)(nil), (*core.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Affinity_To_core_Affinity(a.(*v1.Affinity), b.(*core.Affinity), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Affinity)(nil), (*core.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Affinity_To_core_Affinity(a.(*corev1.Affinity), b.(*core.Affinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Affinity)(nil), (*v1.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Affinity_To_v1_Affinity(a.(*core.Affinity), b.(*v1.Affinity), scope) + if err := s.AddGeneratedConversionFunc((*core.Affinity)(nil), (*corev1.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Affinity_To_v1_Affinity(a.(*core.Affinity), b.(*corev1.Affinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.AppArmorProfile)(nil), (*core.AppArmorProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AppArmorProfile_To_core_AppArmorProfile(a.(*v1.AppArmorProfile), b.(*core.AppArmorProfile), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AppArmorProfile)(nil), (*core.AppArmorProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AppArmorProfile_To_core_AppArmorProfile(a.(*corev1.AppArmorProfile), b.(*core.AppArmorProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AppArmorProfile)(nil), (*v1.AppArmorProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AppArmorProfile_To_v1_AppArmorProfile(a.(*core.AppArmorProfile), b.(*v1.AppArmorProfile), scope) + if err := s.AddGeneratedConversionFunc((*core.AppArmorProfile)(nil), (*corev1.AppArmorProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AppArmorProfile_To_v1_AppArmorProfile(a.(*core.AppArmorProfile), b.(*corev1.AppArmorProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.AttachedVolume)(nil), (*core.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AttachedVolume_To_core_AttachedVolume(a.(*v1.AttachedVolume), b.(*core.AttachedVolume), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AttachedVolume)(nil), (*core.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AttachedVolume_To_core_AttachedVolume(a.(*corev1.AttachedVolume), b.(*core.AttachedVolume), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AttachedVolume)(nil), (*v1.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AttachedVolume_To_v1_AttachedVolume(a.(*core.AttachedVolume), b.(*v1.AttachedVolume), scope) + if err := s.AddGeneratedConversionFunc((*core.AttachedVolume)(nil), (*corev1.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AttachedVolume_To_v1_AttachedVolume(a.(*core.AttachedVolume), b.(*corev1.AttachedVolume), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.AvoidPods)(nil), (*core.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AvoidPods_To_core_AvoidPods(a.(*v1.AvoidPods), b.(*core.AvoidPods), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AvoidPods)(nil), (*core.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AvoidPods_To_core_AvoidPods(a.(*corev1.AvoidPods), b.(*core.AvoidPods), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AvoidPods)(nil), (*v1.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AvoidPods_To_v1_AvoidPods(a.(*core.AvoidPods), b.(*v1.AvoidPods), scope) + if err := s.AddGeneratedConversionFunc((*core.AvoidPods)(nil), (*corev1.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AvoidPods_To_v1_AvoidPods(a.(*core.AvoidPods), b.(*corev1.AvoidPods), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.AzureDiskVolumeSource)(nil), (*core.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(a.(*v1.AzureDiskVolumeSource), b.(*core.AzureDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AzureDiskVolumeSource)(nil), (*core.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(a.(*corev1.AzureDiskVolumeSource), b.(*core.AzureDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AzureDiskVolumeSource)(nil), (*v1.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(a.(*core.AzureDiskVolumeSource), b.(*v1.AzureDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.AzureDiskVolumeSource)(nil), (*corev1.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(a.(*core.AzureDiskVolumeSource), b.(*corev1.AzureDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.AzureFilePersistentVolumeSource)(nil), (*core.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(a.(*v1.AzureFilePersistentVolumeSource), b.(*core.AzureFilePersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AzureFilePersistentVolumeSource)(nil), (*core.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(a.(*corev1.AzureFilePersistentVolumeSource), b.(*core.AzureFilePersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AzureFilePersistentVolumeSource)(nil), (*v1.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(a.(*core.AzureFilePersistentVolumeSource), b.(*v1.AzureFilePersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.AzureFilePersistentVolumeSource)(nil), (*corev1.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(a.(*core.AzureFilePersistentVolumeSource), b.(*corev1.AzureFilePersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.AzureFileVolumeSource)(nil), (*core.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(a.(*v1.AzureFileVolumeSource), b.(*core.AzureFileVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.AzureFileVolumeSource)(nil), (*core.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(a.(*corev1.AzureFileVolumeSource), b.(*core.AzureFileVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.AzureFileVolumeSource)(nil), (*v1.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(a.(*core.AzureFileVolumeSource), b.(*v1.AzureFileVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.AzureFileVolumeSource)(nil), (*corev1.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(a.(*core.AzureFileVolumeSource), b.(*corev1.AzureFileVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Binding)(nil), (*core.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Binding_To_core_Binding(a.(*v1.Binding), b.(*core.Binding), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Binding)(nil), (*core.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Binding_To_core_Binding(a.(*corev1.Binding), b.(*core.Binding), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Binding)(nil), (*v1.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Binding_To_v1_Binding(a.(*core.Binding), b.(*v1.Binding), scope) + if err := s.AddGeneratedConversionFunc((*core.Binding)(nil), (*corev1.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Binding_To_v1_Binding(a.(*core.Binding), b.(*corev1.Binding), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.CSIPersistentVolumeSource)(nil), (*core.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(a.(*v1.CSIPersistentVolumeSource), b.(*core.CSIPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.CSIPersistentVolumeSource)(nil), (*core.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(a.(*corev1.CSIPersistentVolumeSource), b.(*core.CSIPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.CSIPersistentVolumeSource)(nil), (*v1.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(a.(*core.CSIPersistentVolumeSource), b.(*v1.CSIPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.CSIPersistentVolumeSource)(nil), (*corev1.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(a.(*core.CSIPersistentVolumeSource), b.(*corev1.CSIPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.CSIVolumeSource)(nil), (*core.CSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource(a.(*v1.CSIVolumeSource), b.(*core.CSIVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.CSIVolumeSource)(nil), (*core.CSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource(a.(*corev1.CSIVolumeSource), b.(*core.CSIVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.CSIVolumeSource)(nil), (*v1.CSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource(a.(*core.CSIVolumeSource), b.(*v1.CSIVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.CSIVolumeSource)(nil), (*corev1.CSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource(a.(*core.CSIVolumeSource), b.(*corev1.CSIVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Capabilities)(nil), (*core.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Capabilities_To_core_Capabilities(a.(*v1.Capabilities), b.(*core.Capabilities), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Capabilities)(nil), (*core.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Capabilities_To_core_Capabilities(a.(*corev1.Capabilities), b.(*core.Capabilities), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Capabilities)(nil), (*v1.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Capabilities_To_v1_Capabilities(a.(*core.Capabilities), b.(*v1.Capabilities), scope) + if err := s.AddGeneratedConversionFunc((*core.Capabilities)(nil), (*corev1.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Capabilities_To_v1_Capabilities(a.(*core.Capabilities), b.(*corev1.Capabilities), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.CephFSPersistentVolumeSource)(nil), (*core.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(a.(*v1.CephFSPersistentVolumeSource), b.(*core.CephFSPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.CephFSPersistentVolumeSource)(nil), (*core.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(a.(*corev1.CephFSPersistentVolumeSource), b.(*core.CephFSPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.CephFSPersistentVolumeSource)(nil), (*v1.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(a.(*core.CephFSPersistentVolumeSource), b.(*v1.CephFSPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.CephFSPersistentVolumeSource)(nil), (*corev1.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(a.(*core.CephFSPersistentVolumeSource), b.(*corev1.CephFSPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.CephFSVolumeSource)(nil), (*core.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(a.(*v1.CephFSVolumeSource), b.(*core.CephFSVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.CephFSVolumeSource)(nil), (*core.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(a.(*corev1.CephFSVolumeSource), b.(*core.CephFSVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.CephFSVolumeSource)(nil), (*v1.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(a.(*core.CephFSVolumeSource), b.(*v1.CephFSVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.CephFSVolumeSource)(nil), (*corev1.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(a.(*core.CephFSVolumeSource), b.(*corev1.CephFSVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.CinderPersistentVolumeSource)(nil), (*core.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(a.(*v1.CinderPersistentVolumeSource), b.(*core.CinderPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.CinderPersistentVolumeSource)(nil), (*core.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(a.(*corev1.CinderPersistentVolumeSource), b.(*core.CinderPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.CinderPersistentVolumeSource)(nil), (*v1.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(a.(*core.CinderPersistentVolumeSource), b.(*v1.CinderPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.CinderPersistentVolumeSource)(nil), (*corev1.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(a.(*core.CinderPersistentVolumeSource), b.(*corev1.CinderPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.CinderVolumeSource)(nil), (*core.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(a.(*v1.CinderVolumeSource), b.(*core.CinderVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.CinderVolumeSource)(nil), (*core.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(a.(*corev1.CinderVolumeSource), b.(*core.CinderVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.CinderVolumeSource)(nil), (*v1.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(a.(*core.CinderVolumeSource), b.(*v1.CinderVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.CinderVolumeSource)(nil), (*corev1.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(a.(*core.CinderVolumeSource), b.(*corev1.CinderVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*v1.ClientIPConfig), b.(*core.ClientIPConfig), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*corev1.ClientIPConfig), b.(*core.ClientIPConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ClientIPConfig)(nil), (*v1.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ClientIPConfig_To_v1_ClientIPConfig(a.(*core.ClientIPConfig), b.(*v1.ClientIPConfig), scope) + if err := s.AddGeneratedConversionFunc((*core.ClientIPConfig)(nil), (*corev1.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClientIPConfig_To_v1_ClientIPConfig(a.(*core.ClientIPConfig), b.(*corev1.ClientIPConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ClusterTrustBundleProjection)(nil), (*core.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(a.(*v1.ClusterTrustBundleProjection), b.(*core.ClusterTrustBundleProjection), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ClusterTrustBundleProjection)(nil), (*core.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(a.(*corev1.ClusterTrustBundleProjection), b.(*core.ClusterTrustBundleProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ClusterTrustBundleProjection)(nil), (*v1.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(a.(*core.ClusterTrustBundleProjection), b.(*v1.ClusterTrustBundleProjection), scope) + if err := s.AddGeneratedConversionFunc((*core.ClusterTrustBundleProjection)(nil), (*corev1.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(a.(*core.ClusterTrustBundleProjection), b.(*corev1.ClusterTrustBundleProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ComponentCondition)(nil), (*core.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ComponentCondition_To_core_ComponentCondition(a.(*v1.ComponentCondition), b.(*core.ComponentCondition), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ComponentCondition)(nil), (*core.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ComponentCondition_To_core_ComponentCondition(a.(*corev1.ComponentCondition), b.(*core.ComponentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ComponentCondition)(nil), (*v1.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ComponentCondition_To_v1_ComponentCondition(a.(*core.ComponentCondition), b.(*v1.ComponentCondition), scope) + if err := s.AddGeneratedConversionFunc((*core.ComponentCondition)(nil), (*corev1.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ComponentCondition_To_v1_ComponentCondition(a.(*core.ComponentCondition), b.(*corev1.ComponentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ComponentStatus)(nil), (*core.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ComponentStatus_To_core_ComponentStatus(a.(*v1.ComponentStatus), b.(*core.ComponentStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ComponentStatus)(nil), (*core.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ComponentStatus_To_core_ComponentStatus(a.(*corev1.ComponentStatus), b.(*core.ComponentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ComponentStatus)(nil), (*v1.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ComponentStatus_To_v1_ComponentStatus(a.(*core.ComponentStatus), b.(*v1.ComponentStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ComponentStatus)(nil), (*corev1.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ComponentStatus_To_v1_ComponentStatus(a.(*core.ComponentStatus), b.(*corev1.ComponentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ComponentStatusList)(nil), (*core.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ComponentStatusList_To_core_ComponentStatusList(a.(*v1.ComponentStatusList), b.(*core.ComponentStatusList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ComponentStatusList)(nil), (*core.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ComponentStatusList_To_core_ComponentStatusList(a.(*corev1.ComponentStatusList), b.(*core.ComponentStatusList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ComponentStatusList)(nil), (*v1.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ComponentStatusList_To_v1_ComponentStatusList(a.(*core.ComponentStatusList), b.(*v1.ComponentStatusList), scope) + if err := s.AddGeneratedConversionFunc((*core.ComponentStatusList)(nil), (*corev1.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ComponentStatusList_To_v1_ComponentStatusList(a.(*core.ComponentStatusList), b.(*corev1.ComponentStatusList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMap)(nil), (*core.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMap_To_core_ConfigMap(a.(*v1.ConfigMap), b.(*core.ConfigMap), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMap)(nil), (*core.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMap_To_core_ConfigMap(a.(*corev1.ConfigMap), b.(*core.ConfigMap), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMap)(nil), (*v1.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMap_To_v1_ConfigMap(a.(*core.ConfigMap), b.(*v1.ConfigMap), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMap)(nil), (*corev1.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMap_To_v1_ConfigMap(a.(*core.ConfigMap), b.(*corev1.ConfigMap), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMapEnvSource)(nil), (*core.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(a.(*v1.ConfigMapEnvSource), b.(*core.ConfigMapEnvSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapEnvSource)(nil), (*core.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(a.(*corev1.ConfigMapEnvSource), b.(*core.ConfigMapEnvSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMapEnvSource)(nil), (*v1.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(a.(*core.ConfigMapEnvSource), b.(*v1.ConfigMapEnvSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMapEnvSource)(nil), (*corev1.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(a.(*core.ConfigMapEnvSource), b.(*corev1.ConfigMapEnvSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMapKeySelector)(nil), (*core.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(a.(*v1.ConfigMapKeySelector), b.(*core.ConfigMapKeySelector), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapKeySelector)(nil), (*core.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(a.(*corev1.ConfigMapKeySelector), b.(*core.ConfigMapKeySelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMapKeySelector)(nil), (*v1.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(a.(*core.ConfigMapKeySelector), b.(*v1.ConfigMapKeySelector), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMapKeySelector)(nil), (*corev1.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(a.(*core.ConfigMapKeySelector), b.(*corev1.ConfigMapKeySelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMapList)(nil), (*core.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMapList_To_core_ConfigMapList(a.(*v1.ConfigMapList), b.(*core.ConfigMapList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapList)(nil), (*core.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapList_To_core_ConfigMapList(a.(*corev1.ConfigMapList), b.(*core.ConfigMapList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMapList)(nil), (*v1.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMapList_To_v1_ConfigMapList(a.(*core.ConfigMapList), b.(*v1.ConfigMapList), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMapList)(nil), (*corev1.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapList_To_v1_ConfigMapList(a.(*core.ConfigMapList), b.(*corev1.ConfigMapList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMapNodeConfigSource)(nil), (*core.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(a.(*v1.ConfigMapNodeConfigSource), b.(*core.ConfigMapNodeConfigSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapNodeConfigSource)(nil), (*core.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(a.(*corev1.ConfigMapNodeConfigSource), b.(*core.ConfigMapNodeConfigSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMapNodeConfigSource)(nil), (*v1.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(a.(*core.ConfigMapNodeConfigSource), b.(*v1.ConfigMapNodeConfigSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMapNodeConfigSource)(nil), (*corev1.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(a.(*core.ConfigMapNodeConfigSource), b.(*corev1.ConfigMapNodeConfigSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMapProjection)(nil), (*core.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(a.(*v1.ConfigMapProjection), b.(*core.ConfigMapProjection), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapProjection)(nil), (*core.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(a.(*corev1.ConfigMapProjection), b.(*core.ConfigMapProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMapProjection)(nil), (*v1.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(a.(*core.ConfigMapProjection), b.(*v1.ConfigMapProjection), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMapProjection)(nil), (*corev1.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(a.(*core.ConfigMapProjection), b.(*corev1.ConfigMapProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ConfigMapVolumeSource)(nil), (*core.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(a.(*v1.ConfigMapVolumeSource), b.(*core.ConfigMapVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapVolumeSource)(nil), (*core.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(a.(*corev1.ConfigMapVolumeSource), b.(*core.ConfigMapVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ConfigMapVolumeSource)(nil), (*v1.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(a.(*core.ConfigMapVolumeSource), b.(*v1.ConfigMapVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ConfigMapVolumeSource)(nil), (*corev1.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(a.(*core.ConfigMapVolumeSource), b.(*corev1.ConfigMapVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Container)(nil), (*core.Container)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Container_To_core_Container(a.(*v1.Container), b.(*core.Container), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Container)(nil), (*core.Container)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Container_To_core_Container(a.(*corev1.Container), b.(*core.Container), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Container)(nil), (*v1.Container)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Container_To_v1_Container(a.(*core.Container), b.(*v1.Container), scope) + if err := s.AddGeneratedConversionFunc((*core.Container)(nil), (*corev1.Container)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Container_To_v1_Container(a.(*core.Container), b.(*corev1.Container), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerImage)(nil), (*core.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerImage_To_core_ContainerImage(a.(*v1.ContainerImage), b.(*core.ContainerImage), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerImage)(nil), (*core.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerImage_To_core_ContainerImage(a.(*corev1.ContainerImage), b.(*core.ContainerImage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerImage)(nil), (*v1.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerImage_To_v1_ContainerImage(a.(*core.ContainerImage), b.(*v1.ContainerImage), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerImage)(nil), (*corev1.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerImage_To_v1_ContainerImage(a.(*core.ContainerImage), b.(*corev1.ContainerImage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerPort)(nil), (*core.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerPort_To_core_ContainerPort(a.(*v1.ContainerPort), b.(*core.ContainerPort), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerPort)(nil), (*core.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerPort_To_core_ContainerPort(a.(*corev1.ContainerPort), b.(*core.ContainerPort), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerPort)(nil), (*v1.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerPort_To_v1_ContainerPort(a.(*core.ContainerPort), b.(*v1.ContainerPort), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerPort)(nil), (*corev1.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerPort_To_v1_ContainerPort(a.(*core.ContainerPort), b.(*corev1.ContainerPort), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerResizePolicy)(nil), (*core.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(a.(*v1.ContainerResizePolicy), b.(*core.ContainerResizePolicy), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerResizePolicy)(nil), (*core.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(a.(*corev1.ContainerResizePolicy), b.(*core.ContainerResizePolicy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerResizePolicy)(nil), (*v1.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(a.(*core.ContainerResizePolicy), b.(*v1.ContainerResizePolicy), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerResizePolicy)(nil), (*corev1.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(a.(*core.ContainerResizePolicy), b.(*corev1.ContainerResizePolicy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerState)(nil), (*core.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerState_To_core_ContainerState(a.(*v1.ContainerState), b.(*core.ContainerState), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerState)(nil), (*core.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerState_To_core_ContainerState(a.(*corev1.ContainerState), b.(*core.ContainerState), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerState)(nil), (*v1.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerState_To_v1_ContainerState(a.(*core.ContainerState), b.(*v1.ContainerState), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerState)(nil), (*corev1.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerState_To_v1_ContainerState(a.(*core.ContainerState), b.(*corev1.ContainerState), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerStateRunning)(nil), (*core.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(a.(*v1.ContainerStateRunning), b.(*core.ContainerStateRunning), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerStateRunning)(nil), (*core.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(a.(*corev1.ContainerStateRunning), b.(*core.ContainerStateRunning), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerStateRunning)(nil), (*v1.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(a.(*core.ContainerStateRunning), b.(*v1.ContainerStateRunning), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerStateRunning)(nil), (*corev1.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(a.(*core.ContainerStateRunning), b.(*corev1.ContainerStateRunning), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerStateTerminated)(nil), (*core.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(a.(*v1.ContainerStateTerminated), b.(*core.ContainerStateTerminated), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerStateTerminated)(nil), (*core.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(a.(*corev1.ContainerStateTerminated), b.(*core.ContainerStateTerminated), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerStateTerminated)(nil), (*v1.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(a.(*core.ContainerStateTerminated), b.(*v1.ContainerStateTerminated), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerStateTerminated)(nil), (*corev1.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(a.(*core.ContainerStateTerminated), b.(*corev1.ContainerStateTerminated), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerStateWaiting)(nil), (*core.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(a.(*v1.ContainerStateWaiting), b.(*core.ContainerStateWaiting), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerStateWaiting)(nil), (*core.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(a.(*corev1.ContainerStateWaiting), b.(*core.ContainerStateWaiting), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerStateWaiting)(nil), (*v1.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(a.(*core.ContainerStateWaiting), b.(*v1.ContainerStateWaiting), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerStateWaiting)(nil), (*corev1.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(a.(*core.ContainerStateWaiting), b.(*corev1.ContainerStateWaiting), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerStatus)(nil), (*core.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerStatus_To_core_ContainerStatus(a.(*v1.ContainerStatus), b.(*core.ContainerStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerStatus)(nil), (*core.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStatus_To_core_ContainerStatus(a.(*corev1.ContainerStatus), b.(*core.ContainerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerStatus)(nil), (*v1.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerStatus_To_v1_ContainerStatus(a.(*core.ContainerStatus), b.(*v1.ContainerStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerStatus)(nil), (*corev1.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStatus_To_v1_ContainerStatus(a.(*core.ContainerStatus), b.(*corev1.ContainerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ContainerUser)(nil), (*core.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ContainerUser_To_core_ContainerUser(a.(*v1.ContainerUser), b.(*core.ContainerUser), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ContainerUser)(nil), (*core.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerUser_To_core_ContainerUser(a.(*corev1.ContainerUser), b.(*core.ContainerUser), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ContainerUser)(nil), (*v1.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ContainerUser_To_v1_ContainerUser(a.(*core.ContainerUser), b.(*v1.ContainerUser), scope) + if err := s.AddGeneratedConversionFunc((*core.ContainerUser)(nil), (*corev1.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerUser_To_v1_ContainerUser(a.(*core.ContainerUser), b.(*corev1.ContainerUser), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonEndpoint)(nil), (*core.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(a.(*v1.DaemonEndpoint), b.(*core.DaemonEndpoint), scope) + if err := s.AddGeneratedConversionFunc((*corev1.DaemonEndpoint)(nil), (*core.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(a.(*corev1.DaemonEndpoint), b.(*core.DaemonEndpoint), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.DaemonEndpoint)(nil), (*v1.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(a.(*core.DaemonEndpoint), b.(*v1.DaemonEndpoint), scope) + if err := s.AddGeneratedConversionFunc((*core.DaemonEndpoint)(nil), (*corev1.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(a.(*core.DaemonEndpoint), b.(*corev1.DaemonEndpoint), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DownwardAPIProjection)(nil), (*core.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(a.(*v1.DownwardAPIProjection), b.(*core.DownwardAPIProjection), scope) + if err := s.AddGeneratedConversionFunc((*corev1.DownwardAPIProjection)(nil), (*core.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(a.(*corev1.DownwardAPIProjection), b.(*core.DownwardAPIProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.DownwardAPIProjection)(nil), (*v1.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(a.(*core.DownwardAPIProjection), b.(*v1.DownwardAPIProjection), scope) + if err := s.AddGeneratedConversionFunc((*core.DownwardAPIProjection)(nil), (*corev1.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(a.(*core.DownwardAPIProjection), b.(*corev1.DownwardAPIProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DownwardAPIVolumeFile)(nil), (*core.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(a.(*v1.DownwardAPIVolumeFile), b.(*core.DownwardAPIVolumeFile), scope) + if err := s.AddGeneratedConversionFunc((*corev1.DownwardAPIVolumeFile)(nil), (*core.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(a.(*corev1.DownwardAPIVolumeFile), b.(*core.DownwardAPIVolumeFile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeFile)(nil), (*v1.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(a.(*core.DownwardAPIVolumeFile), b.(*v1.DownwardAPIVolumeFile), scope) + if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeFile)(nil), (*corev1.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(a.(*core.DownwardAPIVolumeFile), b.(*corev1.DownwardAPIVolumeFile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DownwardAPIVolumeSource)(nil), (*core.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(a.(*v1.DownwardAPIVolumeSource), b.(*core.DownwardAPIVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.DownwardAPIVolumeSource)(nil), (*core.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(a.(*corev1.DownwardAPIVolumeSource), b.(*core.DownwardAPIVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeSource)(nil), (*v1.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(a.(*core.DownwardAPIVolumeSource), b.(*v1.DownwardAPIVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeSource)(nil), (*corev1.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(a.(*core.DownwardAPIVolumeSource), b.(*corev1.DownwardAPIVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EmptyDirVolumeSource)(nil), (*core.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(a.(*v1.EmptyDirVolumeSource), b.(*core.EmptyDirVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EmptyDirVolumeSource)(nil), (*core.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(a.(*corev1.EmptyDirVolumeSource), b.(*core.EmptyDirVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EmptyDirVolumeSource)(nil), (*v1.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(a.(*core.EmptyDirVolumeSource), b.(*v1.EmptyDirVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.EmptyDirVolumeSource)(nil), (*corev1.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(a.(*core.EmptyDirVolumeSource), b.(*corev1.EmptyDirVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EndpointAddress)(nil), (*core.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EndpointAddress_To_core_EndpointAddress(a.(*v1.EndpointAddress), b.(*core.EndpointAddress), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EndpointAddress)(nil), (*core.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointAddress_To_core_EndpointAddress(a.(*corev1.EndpointAddress), b.(*core.EndpointAddress), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EndpointAddress)(nil), (*v1.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EndpointAddress_To_v1_EndpointAddress(a.(*core.EndpointAddress), b.(*v1.EndpointAddress), scope) + if err := s.AddGeneratedConversionFunc((*core.EndpointAddress)(nil), (*corev1.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointAddress_To_v1_EndpointAddress(a.(*core.EndpointAddress), b.(*corev1.EndpointAddress), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EndpointPort)(nil), (*core.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EndpointPort_To_core_EndpointPort(a.(*v1.EndpointPort), b.(*core.EndpointPort), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EndpointPort)(nil), (*core.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointPort_To_core_EndpointPort(a.(*corev1.EndpointPort), b.(*core.EndpointPort), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EndpointPort)(nil), (*v1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EndpointPort_To_v1_EndpointPort(a.(*core.EndpointPort), b.(*v1.EndpointPort), scope) + if err := s.AddGeneratedConversionFunc((*core.EndpointPort)(nil), (*corev1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointPort_To_v1_EndpointPort(a.(*core.EndpointPort), b.(*corev1.EndpointPort), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EndpointSubset)(nil), (*core.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EndpointSubset_To_core_EndpointSubset(a.(*v1.EndpointSubset), b.(*core.EndpointSubset), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EndpointSubset)(nil), (*core.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointSubset_To_core_EndpointSubset(a.(*corev1.EndpointSubset), b.(*core.EndpointSubset), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EndpointSubset)(nil), (*v1.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EndpointSubset_To_v1_EndpointSubset(a.(*core.EndpointSubset), b.(*v1.EndpointSubset), scope) + if err := s.AddGeneratedConversionFunc((*core.EndpointSubset)(nil), (*corev1.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointSubset_To_v1_EndpointSubset(a.(*core.EndpointSubset), b.(*corev1.EndpointSubset), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Endpoints)(nil), (*core.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Endpoints_To_core_Endpoints(a.(*v1.Endpoints), b.(*core.Endpoints), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Endpoints)(nil), (*core.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Endpoints_To_core_Endpoints(a.(*corev1.Endpoints), b.(*core.Endpoints), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Endpoints)(nil), (*v1.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Endpoints_To_v1_Endpoints(a.(*core.Endpoints), b.(*v1.Endpoints), scope) + if err := s.AddGeneratedConversionFunc((*core.Endpoints)(nil), (*corev1.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Endpoints_To_v1_Endpoints(a.(*core.Endpoints), b.(*corev1.Endpoints), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EndpointsList)(nil), (*core.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EndpointsList_To_core_EndpointsList(a.(*v1.EndpointsList), b.(*core.EndpointsList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EndpointsList)(nil), (*core.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointsList_To_core_EndpointsList(a.(*corev1.EndpointsList), b.(*core.EndpointsList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EndpointsList)(nil), (*v1.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EndpointsList_To_v1_EndpointsList(a.(*core.EndpointsList), b.(*v1.EndpointsList), scope) + if err := s.AddGeneratedConversionFunc((*core.EndpointsList)(nil), (*corev1.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointsList_To_v1_EndpointsList(a.(*core.EndpointsList), b.(*corev1.EndpointsList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EnvFromSource)(nil), (*core.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EnvFromSource_To_core_EnvFromSource(a.(*v1.EnvFromSource), b.(*core.EnvFromSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EnvFromSource)(nil), (*core.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EnvFromSource_To_core_EnvFromSource(a.(*corev1.EnvFromSource), b.(*core.EnvFromSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EnvFromSource)(nil), (*v1.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EnvFromSource_To_v1_EnvFromSource(a.(*core.EnvFromSource), b.(*v1.EnvFromSource), scope) + if err := s.AddGeneratedConversionFunc((*core.EnvFromSource)(nil), (*corev1.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EnvFromSource_To_v1_EnvFromSource(a.(*core.EnvFromSource), b.(*corev1.EnvFromSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EnvVar)(nil), (*core.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EnvVar_To_core_EnvVar(a.(*v1.EnvVar), b.(*core.EnvVar), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EnvVar)(nil), (*core.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EnvVar_To_core_EnvVar(a.(*corev1.EnvVar), b.(*core.EnvVar), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EnvVar)(nil), (*v1.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EnvVar_To_v1_EnvVar(a.(*core.EnvVar), b.(*v1.EnvVar), scope) + if err := s.AddGeneratedConversionFunc((*core.EnvVar)(nil), (*corev1.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EnvVar_To_v1_EnvVar(a.(*core.EnvVar), b.(*corev1.EnvVar), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EnvVarSource)(nil), (*core.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EnvVarSource_To_core_EnvVarSource(a.(*v1.EnvVarSource), b.(*core.EnvVarSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EnvVarSource)(nil), (*core.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EnvVarSource_To_core_EnvVarSource(a.(*corev1.EnvVarSource), b.(*core.EnvVarSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EnvVarSource)(nil), (*v1.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EnvVarSource_To_v1_EnvVarSource(a.(*core.EnvVarSource), b.(*v1.EnvVarSource), scope) + if err := s.AddGeneratedConversionFunc((*core.EnvVarSource)(nil), (*corev1.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EnvVarSource_To_v1_EnvVarSource(a.(*core.EnvVarSource), b.(*corev1.EnvVarSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EphemeralContainer)(nil), (*core.EphemeralContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EphemeralContainer_To_core_EphemeralContainer(a.(*v1.EphemeralContainer), b.(*core.EphemeralContainer), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EphemeralContainer)(nil), (*core.EphemeralContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EphemeralContainer_To_core_EphemeralContainer(a.(*corev1.EphemeralContainer), b.(*core.EphemeralContainer), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EphemeralContainer)(nil), (*v1.EphemeralContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EphemeralContainer_To_v1_EphemeralContainer(a.(*core.EphemeralContainer), b.(*v1.EphemeralContainer), scope) + if err := s.AddGeneratedConversionFunc((*core.EphemeralContainer)(nil), (*corev1.EphemeralContainer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EphemeralContainer_To_v1_EphemeralContainer(a.(*core.EphemeralContainer), b.(*corev1.EphemeralContainer), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EphemeralContainerCommon)(nil), (*core.EphemeralContainerCommon)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(a.(*v1.EphemeralContainerCommon), b.(*core.EphemeralContainerCommon), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EphemeralContainerCommon)(nil), (*core.EphemeralContainerCommon)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(a.(*corev1.EphemeralContainerCommon), b.(*core.EphemeralContainerCommon), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EphemeralContainerCommon)(nil), (*v1.EphemeralContainerCommon)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(a.(*core.EphemeralContainerCommon), b.(*v1.EphemeralContainerCommon), scope) + if err := s.AddGeneratedConversionFunc((*core.EphemeralContainerCommon)(nil), (*corev1.EphemeralContainerCommon)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(a.(*core.EphemeralContainerCommon), b.(*corev1.EphemeralContainerCommon), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EphemeralVolumeSource)(nil), (*core.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(a.(*v1.EphemeralVolumeSource), b.(*core.EphemeralVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EphemeralVolumeSource)(nil), (*core.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(a.(*corev1.EphemeralVolumeSource), b.(*core.EphemeralVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EphemeralVolumeSource)(nil), (*v1.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(a.(*core.EphemeralVolumeSource), b.(*v1.EphemeralVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.EphemeralVolumeSource)(nil), (*corev1.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(a.(*core.EphemeralVolumeSource), b.(*corev1.EphemeralVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Event_To_core_Event(a.(*v1.Event), b.(*core.Event), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Event_To_core_Event(a.(*corev1.Event), b.(*core.Event), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Event)(nil), (*v1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Event_To_v1_Event(a.(*core.Event), b.(*v1.Event), scope) + if err := s.AddGeneratedConversionFunc((*core.Event)(nil), (*corev1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Event_To_v1_Event(a.(*core.Event), b.(*corev1.Event), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EventList_To_core_EventList(a.(*v1.EventList), b.(*core.EventList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventList_To_core_EventList(a.(*corev1.EventList), b.(*core.EventList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EventList)(nil), (*v1.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EventList_To_v1_EventList(a.(*core.EventList), b.(*v1.EventList), scope) + if err := s.AddGeneratedConversionFunc((*core.EventList)(nil), (*corev1.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EventList_To_v1_EventList(a.(*core.EventList), b.(*corev1.EventList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EventSeries)(nil), (*core.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EventSeries_To_core_EventSeries(a.(*v1.EventSeries), b.(*core.EventSeries), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EventSeries)(nil), (*core.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventSeries_To_core_EventSeries(a.(*corev1.EventSeries), b.(*core.EventSeries), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EventSeries)(nil), (*v1.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EventSeries_To_v1_EventSeries(a.(*core.EventSeries), b.(*v1.EventSeries), scope) + if err := s.AddGeneratedConversionFunc((*core.EventSeries)(nil), (*corev1.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EventSeries_To_v1_EventSeries(a.(*core.EventSeries), b.(*corev1.EventSeries), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.EventSource)(nil), (*core.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_EventSource_To_core_EventSource(a.(*v1.EventSource), b.(*core.EventSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.EventSource)(nil), (*core.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventSource_To_core_EventSource(a.(*corev1.EventSource), b.(*core.EventSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.EventSource)(nil), (*v1.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_EventSource_To_v1_EventSource(a.(*core.EventSource), b.(*v1.EventSource), scope) + if err := s.AddGeneratedConversionFunc((*core.EventSource)(nil), (*corev1.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EventSource_To_v1_EventSource(a.(*core.EventSource), b.(*corev1.EventSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ExecAction)(nil), (*core.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExecAction_To_core_ExecAction(a.(*v1.ExecAction), b.(*core.ExecAction), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ExecAction)(nil), (*core.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecAction_To_core_ExecAction(a.(*corev1.ExecAction), b.(*core.ExecAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ExecAction)(nil), (*v1.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ExecAction_To_v1_ExecAction(a.(*core.ExecAction), b.(*v1.ExecAction), scope) + if err := s.AddGeneratedConversionFunc((*core.ExecAction)(nil), (*corev1.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ExecAction_To_v1_ExecAction(a.(*core.ExecAction), b.(*corev1.ExecAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.FCVolumeSource)(nil), (*core.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_FCVolumeSource_To_core_FCVolumeSource(a.(*v1.FCVolumeSource), b.(*core.FCVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.FCVolumeSource)(nil), (*core.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FCVolumeSource_To_core_FCVolumeSource(a.(*corev1.FCVolumeSource), b.(*core.FCVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.FCVolumeSource)(nil), (*v1.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_FCVolumeSource_To_v1_FCVolumeSource(a.(*core.FCVolumeSource), b.(*v1.FCVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.FCVolumeSource)(nil), (*corev1.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FCVolumeSource_To_v1_FCVolumeSource(a.(*core.FCVolumeSource), b.(*corev1.FCVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.FlexPersistentVolumeSource)(nil), (*core.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(a.(*v1.FlexPersistentVolumeSource), b.(*core.FlexPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.FlexPersistentVolumeSource)(nil), (*core.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(a.(*corev1.FlexPersistentVolumeSource), b.(*core.FlexPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.FlexPersistentVolumeSource)(nil), (*v1.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(a.(*core.FlexPersistentVolumeSource), b.(*v1.FlexPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.FlexPersistentVolumeSource)(nil), (*corev1.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(a.(*core.FlexPersistentVolumeSource), b.(*corev1.FlexPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.FlexVolumeSource)(nil), (*core.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(a.(*v1.FlexVolumeSource), b.(*core.FlexVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.FlexVolumeSource)(nil), (*core.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(a.(*corev1.FlexVolumeSource), b.(*core.FlexVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.FlexVolumeSource)(nil), (*v1.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(a.(*core.FlexVolumeSource), b.(*v1.FlexVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.FlexVolumeSource)(nil), (*corev1.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(a.(*core.FlexVolumeSource), b.(*corev1.FlexVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.FlockerVolumeSource)(nil), (*core.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(a.(*v1.FlockerVolumeSource), b.(*core.FlockerVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.FlockerVolumeSource)(nil), (*core.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(a.(*corev1.FlockerVolumeSource), b.(*core.FlockerVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.FlockerVolumeSource)(nil), (*v1.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(a.(*core.FlockerVolumeSource), b.(*v1.FlockerVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.FlockerVolumeSource)(nil), (*corev1.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(a.(*core.FlockerVolumeSource), b.(*corev1.FlockerVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.GCEPersistentDiskVolumeSource)(nil), (*core.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(a.(*v1.GCEPersistentDiskVolumeSource), b.(*core.GCEPersistentDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.GCEPersistentDiskVolumeSource)(nil), (*core.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(a.(*corev1.GCEPersistentDiskVolumeSource), b.(*core.GCEPersistentDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.GCEPersistentDiskVolumeSource)(nil), (*v1.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(a.(*core.GCEPersistentDiskVolumeSource), b.(*v1.GCEPersistentDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.GCEPersistentDiskVolumeSource)(nil), (*corev1.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(a.(*core.GCEPersistentDiskVolumeSource), b.(*corev1.GCEPersistentDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.GRPCAction)(nil), (*core.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_GRPCAction_To_core_GRPCAction(a.(*v1.GRPCAction), b.(*core.GRPCAction), scope) + if err := s.AddGeneratedConversionFunc((*corev1.GRPCAction)(nil), (*core.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GRPCAction_To_core_GRPCAction(a.(*corev1.GRPCAction), b.(*core.GRPCAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.GRPCAction)(nil), (*v1.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_GRPCAction_To_v1_GRPCAction(a.(*core.GRPCAction), b.(*v1.GRPCAction), scope) + if err := s.AddGeneratedConversionFunc((*core.GRPCAction)(nil), (*corev1.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GRPCAction_To_v1_GRPCAction(a.(*core.GRPCAction), b.(*corev1.GRPCAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.GitRepoVolumeSource)(nil), (*core.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(a.(*v1.GitRepoVolumeSource), b.(*core.GitRepoVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.GitRepoVolumeSource)(nil), (*core.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(a.(*corev1.GitRepoVolumeSource), b.(*core.GitRepoVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.GitRepoVolumeSource)(nil), (*v1.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(a.(*core.GitRepoVolumeSource), b.(*v1.GitRepoVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.GitRepoVolumeSource)(nil), (*corev1.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(a.(*core.GitRepoVolumeSource), b.(*corev1.GitRepoVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.GlusterfsPersistentVolumeSource)(nil), (*core.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(a.(*v1.GlusterfsPersistentVolumeSource), b.(*core.GlusterfsPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.GlusterfsPersistentVolumeSource)(nil), (*core.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(a.(*corev1.GlusterfsPersistentVolumeSource), b.(*core.GlusterfsPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.GlusterfsPersistentVolumeSource)(nil), (*v1.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(a.(*core.GlusterfsPersistentVolumeSource), b.(*v1.GlusterfsPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.GlusterfsPersistentVolumeSource)(nil), (*corev1.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(a.(*core.GlusterfsPersistentVolumeSource), b.(*corev1.GlusterfsPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.GlusterfsVolumeSource)(nil), (*core.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(a.(*v1.GlusterfsVolumeSource), b.(*core.GlusterfsVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.GlusterfsVolumeSource)(nil), (*core.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(a.(*corev1.GlusterfsVolumeSource), b.(*core.GlusterfsVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.GlusterfsVolumeSource)(nil), (*v1.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(a.(*core.GlusterfsVolumeSource), b.(*v1.GlusterfsVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.GlusterfsVolumeSource)(nil), (*corev1.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(a.(*core.GlusterfsVolumeSource), b.(*corev1.GlusterfsVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.HTTPGetAction)(nil), (*core.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HTTPGetAction_To_core_HTTPGetAction(a.(*v1.HTTPGetAction), b.(*core.HTTPGetAction), scope) + if err := s.AddGeneratedConversionFunc((*corev1.HTTPGetAction)(nil), (*core.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HTTPGetAction_To_core_HTTPGetAction(a.(*corev1.HTTPGetAction), b.(*core.HTTPGetAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.HTTPGetAction)(nil), (*v1.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_HTTPGetAction_To_v1_HTTPGetAction(a.(*core.HTTPGetAction), b.(*v1.HTTPGetAction), scope) + if err := s.AddGeneratedConversionFunc((*core.HTTPGetAction)(nil), (*corev1.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HTTPGetAction_To_v1_HTTPGetAction(a.(*core.HTTPGetAction), b.(*corev1.HTTPGetAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.HTTPHeader)(nil), (*core.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HTTPHeader_To_core_HTTPHeader(a.(*v1.HTTPHeader), b.(*core.HTTPHeader), scope) + if err := s.AddGeneratedConversionFunc((*corev1.HTTPHeader)(nil), (*core.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HTTPHeader_To_core_HTTPHeader(a.(*corev1.HTTPHeader), b.(*core.HTTPHeader), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.HTTPHeader)(nil), (*v1.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_HTTPHeader_To_v1_HTTPHeader(a.(*core.HTTPHeader), b.(*v1.HTTPHeader), scope) + if err := s.AddGeneratedConversionFunc((*core.HTTPHeader)(nil), (*corev1.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HTTPHeader_To_v1_HTTPHeader(a.(*core.HTTPHeader), b.(*corev1.HTTPHeader), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.HostAlias)(nil), (*core.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HostAlias_To_core_HostAlias(a.(*v1.HostAlias), b.(*core.HostAlias), scope) + if err := s.AddGeneratedConversionFunc((*corev1.HostAlias)(nil), (*core.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HostAlias_To_core_HostAlias(a.(*corev1.HostAlias), b.(*core.HostAlias), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.HostAlias)(nil), (*v1.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_HostAlias_To_v1_HostAlias(a.(*core.HostAlias), b.(*v1.HostAlias), scope) + if err := s.AddGeneratedConversionFunc((*core.HostAlias)(nil), (*corev1.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HostAlias_To_v1_HostAlias(a.(*core.HostAlias), b.(*corev1.HostAlias), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.HostIP)(nil), (*core.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HostIP_To_core_HostIP(a.(*v1.HostIP), b.(*core.HostIP), scope) + if err := s.AddGeneratedConversionFunc((*corev1.HostIP)(nil), (*core.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HostIP_To_core_HostIP(a.(*corev1.HostIP), b.(*core.HostIP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.HostIP)(nil), (*v1.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_HostIP_To_v1_HostIP(a.(*core.HostIP), b.(*v1.HostIP), scope) + if err := s.AddGeneratedConversionFunc((*core.HostIP)(nil), (*corev1.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HostIP_To_v1_HostIP(a.(*core.HostIP), b.(*corev1.HostIP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.HostPathVolumeSource)(nil), (*core.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(a.(*v1.HostPathVolumeSource), b.(*core.HostPathVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.HostPathVolumeSource)(nil), (*core.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(a.(*corev1.HostPathVolumeSource), b.(*core.HostPathVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.HostPathVolumeSource)(nil), (*v1.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(a.(*core.HostPathVolumeSource), b.(*v1.HostPathVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.HostPathVolumeSource)(nil), (*corev1.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(a.(*core.HostPathVolumeSource), b.(*corev1.HostPathVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ISCSIPersistentVolumeSource)(nil), (*core.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(a.(*v1.ISCSIPersistentVolumeSource), b.(*core.ISCSIPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ISCSIPersistentVolumeSource)(nil), (*core.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(a.(*corev1.ISCSIPersistentVolumeSource), b.(*core.ISCSIPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ISCSIPersistentVolumeSource)(nil), (*v1.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(a.(*core.ISCSIPersistentVolumeSource), b.(*v1.ISCSIPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ISCSIPersistentVolumeSource)(nil), (*corev1.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(a.(*core.ISCSIPersistentVolumeSource), b.(*corev1.ISCSIPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ISCSIVolumeSource)(nil), (*core.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(a.(*v1.ISCSIVolumeSource), b.(*core.ISCSIVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ISCSIVolumeSource)(nil), (*core.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(a.(*corev1.ISCSIVolumeSource), b.(*core.ISCSIVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ISCSIVolumeSource)(nil), (*v1.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(a.(*core.ISCSIVolumeSource), b.(*v1.ISCSIVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ISCSIVolumeSource)(nil), (*corev1.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(a.(*core.ISCSIVolumeSource), b.(*corev1.ISCSIVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ImageVolumeSource)(nil), (*core.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(a.(*v1.ImageVolumeSource), b.(*core.ImageVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ImageVolumeSource)(nil), (*core.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(a.(*corev1.ImageVolumeSource), b.(*core.ImageVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ImageVolumeSource)(nil), (*v1.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(a.(*core.ImageVolumeSource), b.(*v1.ImageVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ImageVolumeSource)(nil), (*corev1.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(a.(*core.ImageVolumeSource), b.(*corev1.ImageVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.KeyToPath)(nil), (*core.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_KeyToPath_To_core_KeyToPath(a.(*v1.KeyToPath), b.(*core.KeyToPath), scope) + if err := s.AddGeneratedConversionFunc((*corev1.KeyToPath)(nil), (*core.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_KeyToPath_To_core_KeyToPath(a.(*corev1.KeyToPath), b.(*core.KeyToPath), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.KeyToPath)(nil), (*v1.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_KeyToPath_To_v1_KeyToPath(a.(*core.KeyToPath), b.(*v1.KeyToPath), scope) + if err := s.AddGeneratedConversionFunc((*core.KeyToPath)(nil), (*corev1.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KeyToPath_To_v1_KeyToPath(a.(*core.KeyToPath), b.(*corev1.KeyToPath), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Lifecycle)(nil), (*core.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Lifecycle_To_core_Lifecycle(a.(*v1.Lifecycle), b.(*core.Lifecycle), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Lifecycle)(nil), (*core.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Lifecycle_To_core_Lifecycle(a.(*corev1.Lifecycle), b.(*core.Lifecycle), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Lifecycle)(nil), (*v1.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Lifecycle_To_v1_Lifecycle(a.(*core.Lifecycle), b.(*v1.Lifecycle), scope) + if err := s.AddGeneratedConversionFunc((*core.Lifecycle)(nil), (*corev1.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Lifecycle_To_v1_Lifecycle(a.(*core.Lifecycle), b.(*corev1.Lifecycle), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LifecycleHandler)(nil), (*core.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LifecycleHandler_To_core_LifecycleHandler(a.(*v1.LifecycleHandler), b.(*core.LifecycleHandler), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LifecycleHandler)(nil), (*core.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LifecycleHandler_To_core_LifecycleHandler(a.(*corev1.LifecycleHandler), b.(*core.LifecycleHandler), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LifecycleHandler)(nil), (*v1.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LifecycleHandler_To_v1_LifecycleHandler(a.(*core.LifecycleHandler), b.(*v1.LifecycleHandler), scope) + if err := s.AddGeneratedConversionFunc((*core.LifecycleHandler)(nil), (*corev1.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LifecycleHandler_To_v1_LifecycleHandler(a.(*core.LifecycleHandler), b.(*corev1.LifecycleHandler), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LimitRange)(nil), (*core.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LimitRange_To_core_LimitRange(a.(*v1.LimitRange), b.(*core.LimitRange), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LimitRange)(nil), (*core.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRange_To_core_LimitRange(a.(*corev1.LimitRange), b.(*core.LimitRange), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LimitRange)(nil), (*v1.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LimitRange_To_v1_LimitRange(a.(*core.LimitRange), b.(*v1.LimitRange), scope) + if err := s.AddGeneratedConversionFunc((*core.LimitRange)(nil), (*corev1.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRange_To_v1_LimitRange(a.(*core.LimitRange), b.(*corev1.LimitRange), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LimitRangeItem)(nil), (*core.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LimitRangeItem_To_core_LimitRangeItem(a.(*v1.LimitRangeItem), b.(*core.LimitRangeItem), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LimitRangeItem)(nil), (*core.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRangeItem_To_core_LimitRangeItem(a.(*corev1.LimitRangeItem), b.(*core.LimitRangeItem), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LimitRangeItem)(nil), (*v1.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LimitRangeItem_To_v1_LimitRangeItem(a.(*core.LimitRangeItem), b.(*v1.LimitRangeItem), scope) + if err := s.AddGeneratedConversionFunc((*core.LimitRangeItem)(nil), (*corev1.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRangeItem_To_v1_LimitRangeItem(a.(*core.LimitRangeItem), b.(*corev1.LimitRangeItem), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LimitRangeList)(nil), (*core.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LimitRangeList_To_core_LimitRangeList(a.(*v1.LimitRangeList), b.(*core.LimitRangeList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LimitRangeList)(nil), (*core.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRangeList_To_core_LimitRangeList(a.(*corev1.LimitRangeList), b.(*core.LimitRangeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LimitRangeList)(nil), (*v1.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LimitRangeList_To_v1_LimitRangeList(a.(*core.LimitRangeList), b.(*v1.LimitRangeList), scope) + if err := s.AddGeneratedConversionFunc((*core.LimitRangeList)(nil), (*corev1.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRangeList_To_v1_LimitRangeList(a.(*core.LimitRangeList), b.(*corev1.LimitRangeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LimitRangeSpec)(nil), (*core.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(a.(*v1.LimitRangeSpec), b.(*core.LimitRangeSpec), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LimitRangeSpec)(nil), (*core.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(a.(*corev1.LimitRangeSpec), b.(*core.LimitRangeSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LimitRangeSpec)(nil), (*v1.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(a.(*core.LimitRangeSpec), b.(*v1.LimitRangeSpec), scope) + if err := s.AddGeneratedConversionFunc((*core.LimitRangeSpec)(nil), (*corev1.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(a.(*core.LimitRangeSpec), b.(*corev1.LimitRangeSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LinuxContainerUser)(nil), (*core.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(a.(*v1.LinuxContainerUser), b.(*core.LinuxContainerUser), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LinuxContainerUser)(nil), (*core.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(a.(*corev1.LinuxContainerUser), b.(*core.LinuxContainerUser), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LinuxContainerUser)(nil), (*v1.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(a.(*core.LinuxContainerUser), b.(*v1.LinuxContainerUser), scope) + if err := s.AddGeneratedConversionFunc((*core.LinuxContainerUser)(nil), (*corev1.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(a.(*core.LinuxContainerUser), b.(*corev1.LinuxContainerUser), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*core.List)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_List_To_core_List(a.(*v1.List), b.(*core.List), scope) + if err := s.AddGeneratedConversionFunc((*corev1.List)(nil), (*core.List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_List_To_core_List(a.(*corev1.List), b.(*core.List), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_List_To_v1_List(a.(*core.List), b.(*v1.List), scope) + if err := s.AddGeneratedConversionFunc((*core.List)(nil), (*corev1.List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_List_To_v1_List(a.(*core.List), b.(*corev1.List), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LoadBalancerIngress)(nil), (*core.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(a.(*v1.LoadBalancerIngress), b.(*core.LoadBalancerIngress), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LoadBalancerIngress)(nil), (*core.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(a.(*corev1.LoadBalancerIngress), b.(*core.LoadBalancerIngress), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LoadBalancerIngress)(nil), (*v1.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(a.(*core.LoadBalancerIngress), b.(*v1.LoadBalancerIngress), scope) + if err := s.AddGeneratedConversionFunc((*core.LoadBalancerIngress)(nil), (*corev1.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(a.(*core.LoadBalancerIngress), b.(*corev1.LoadBalancerIngress), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*v1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*corev1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LoadBalancerStatus)(nil), (*v1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*v1.LoadBalancerStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.LoadBalancerStatus)(nil), (*corev1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*corev1.LoadBalancerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LocalObjectReference)(nil), (*core.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LocalObjectReference_To_core_LocalObjectReference(a.(*v1.LocalObjectReference), b.(*core.LocalObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LocalObjectReference)(nil), (*core.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LocalObjectReference_To_core_LocalObjectReference(a.(*corev1.LocalObjectReference), b.(*core.LocalObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LocalObjectReference)(nil), (*v1.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LocalObjectReference_To_v1_LocalObjectReference(a.(*core.LocalObjectReference), b.(*v1.LocalObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*core.LocalObjectReference)(nil), (*corev1.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LocalObjectReference_To_v1_LocalObjectReference(a.(*core.LocalObjectReference), b.(*corev1.LocalObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LocalVolumeSource)(nil), (*core.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(a.(*v1.LocalVolumeSource), b.(*core.LocalVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.LocalVolumeSource)(nil), (*core.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(a.(*corev1.LocalVolumeSource), b.(*core.LocalVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.LocalVolumeSource)(nil), (*v1.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(a.(*core.LocalVolumeSource), b.(*v1.LocalVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.LocalVolumeSource)(nil), (*corev1.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(a.(*core.LocalVolumeSource), b.(*corev1.LocalVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ModifyVolumeStatus)(nil), (*core.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(a.(*v1.ModifyVolumeStatus), b.(*core.ModifyVolumeStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ModifyVolumeStatus)(nil), (*core.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(a.(*corev1.ModifyVolumeStatus), b.(*core.ModifyVolumeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ModifyVolumeStatus)(nil), (*v1.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(a.(*core.ModifyVolumeStatus), b.(*v1.ModifyVolumeStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ModifyVolumeStatus)(nil), (*corev1.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(a.(*core.ModifyVolumeStatus), b.(*corev1.ModifyVolumeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NFSVolumeSource)(nil), (*core.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(a.(*v1.NFSVolumeSource), b.(*core.NFSVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NFSVolumeSource)(nil), (*core.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(a.(*corev1.NFSVolumeSource), b.(*core.NFSVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NFSVolumeSource)(nil), (*v1.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(a.(*core.NFSVolumeSource), b.(*v1.NFSVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.NFSVolumeSource)(nil), (*corev1.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(a.(*core.NFSVolumeSource), b.(*corev1.NFSVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Namespace)(nil), (*core.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Namespace_To_core_Namespace(a.(*v1.Namespace), b.(*core.Namespace), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Namespace)(nil), (*core.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Namespace_To_core_Namespace(a.(*corev1.Namespace), b.(*core.Namespace), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Namespace)(nil), (*v1.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Namespace_To_v1_Namespace(a.(*core.Namespace), b.(*v1.Namespace), scope) + if err := s.AddGeneratedConversionFunc((*core.Namespace)(nil), (*corev1.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Namespace_To_v1_Namespace(a.(*core.Namespace), b.(*corev1.Namespace), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NamespaceCondition)(nil), (*core.NamespaceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NamespaceCondition_To_core_NamespaceCondition(a.(*v1.NamespaceCondition), b.(*core.NamespaceCondition), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NamespaceCondition)(nil), (*core.NamespaceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceCondition_To_core_NamespaceCondition(a.(*corev1.NamespaceCondition), b.(*core.NamespaceCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NamespaceCondition)(nil), (*v1.NamespaceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NamespaceCondition_To_v1_NamespaceCondition(a.(*core.NamespaceCondition), b.(*v1.NamespaceCondition), scope) + if err := s.AddGeneratedConversionFunc((*core.NamespaceCondition)(nil), (*corev1.NamespaceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceCondition_To_v1_NamespaceCondition(a.(*core.NamespaceCondition), b.(*corev1.NamespaceCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NamespaceList)(nil), (*core.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NamespaceList_To_core_NamespaceList(a.(*v1.NamespaceList), b.(*core.NamespaceList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NamespaceList)(nil), (*core.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceList_To_core_NamespaceList(a.(*corev1.NamespaceList), b.(*core.NamespaceList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NamespaceList)(nil), (*v1.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NamespaceList_To_v1_NamespaceList(a.(*core.NamespaceList), b.(*v1.NamespaceList), scope) + if err := s.AddGeneratedConversionFunc((*core.NamespaceList)(nil), (*corev1.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceList_To_v1_NamespaceList(a.(*core.NamespaceList), b.(*corev1.NamespaceList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NamespaceSpec)(nil), (*core.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NamespaceSpec_To_core_NamespaceSpec(a.(*v1.NamespaceSpec), b.(*core.NamespaceSpec), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NamespaceSpec)(nil), (*core.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceSpec_To_core_NamespaceSpec(a.(*corev1.NamespaceSpec), b.(*core.NamespaceSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NamespaceSpec)(nil), (*v1.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NamespaceSpec_To_v1_NamespaceSpec(a.(*core.NamespaceSpec), b.(*v1.NamespaceSpec), scope) + if err := s.AddGeneratedConversionFunc((*core.NamespaceSpec)(nil), (*corev1.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceSpec_To_v1_NamespaceSpec(a.(*core.NamespaceSpec), b.(*corev1.NamespaceSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NamespaceStatus)(nil), (*core.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NamespaceStatus_To_core_NamespaceStatus(a.(*v1.NamespaceStatus), b.(*core.NamespaceStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NamespaceStatus)(nil), (*core.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceStatus_To_core_NamespaceStatus(a.(*corev1.NamespaceStatus), b.(*core.NamespaceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NamespaceStatus)(nil), (*v1.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NamespaceStatus_To_v1_NamespaceStatus(a.(*core.NamespaceStatus), b.(*v1.NamespaceStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.NamespaceStatus)(nil), (*corev1.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceStatus_To_v1_NamespaceStatus(a.(*core.NamespaceStatus), b.(*corev1.NamespaceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Node)(nil), (*core.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Node_To_core_Node(a.(*v1.Node), b.(*core.Node), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Node)(nil), (*core.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Node_To_core_Node(a.(*corev1.Node), b.(*core.Node), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Node)(nil), (*v1.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Node_To_v1_Node(a.(*core.Node), b.(*v1.Node), scope) + if err := s.AddGeneratedConversionFunc((*core.Node)(nil), (*corev1.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Node_To_v1_Node(a.(*core.Node), b.(*corev1.Node), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeAddress)(nil), (*core.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeAddress_To_core_NodeAddress(a.(*v1.NodeAddress), b.(*core.NodeAddress), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeAddress)(nil), (*core.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeAddress_To_core_NodeAddress(a.(*corev1.NodeAddress), b.(*core.NodeAddress), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeAddress)(nil), (*v1.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeAddress_To_v1_NodeAddress(a.(*core.NodeAddress), b.(*v1.NodeAddress), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeAddress)(nil), (*corev1.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeAddress_To_v1_NodeAddress(a.(*core.NodeAddress), b.(*corev1.NodeAddress), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeAffinity)(nil), (*core.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeAffinity_To_core_NodeAffinity(a.(*v1.NodeAffinity), b.(*core.NodeAffinity), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeAffinity)(nil), (*core.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeAffinity_To_core_NodeAffinity(a.(*corev1.NodeAffinity), b.(*core.NodeAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeAffinity)(nil), (*v1.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeAffinity_To_v1_NodeAffinity(a.(*core.NodeAffinity), b.(*v1.NodeAffinity), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeAffinity)(nil), (*corev1.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeAffinity_To_v1_NodeAffinity(a.(*core.NodeAffinity), b.(*corev1.NodeAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeCondition)(nil), (*core.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeCondition_To_core_NodeCondition(a.(*v1.NodeCondition), b.(*core.NodeCondition), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeCondition)(nil), (*core.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeCondition_To_core_NodeCondition(a.(*corev1.NodeCondition), b.(*core.NodeCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeCondition)(nil), (*v1.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeCondition_To_v1_NodeCondition(a.(*core.NodeCondition), b.(*v1.NodeCondition), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeCondition)(nil), (*corev1.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeCondition_To_v1_NodeCondition(a.(*core.NodeCondition), b.(*corev1.NodeCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeConfigSource)(nil), (*core.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeConfigSource_To_core_NodeConfigSource(a.(*v1.NodeConfigSource), b.(*core.NodeConfigSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeConfigSource)(nil), (*core.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeConfigSource_To_core_NodeConfigSource(a.(*corev1.NodeConfigSource), b.(*core.NodeConfigSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeConfigSource)(nil), (*v1.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeConfigSource_To_v1_NodeConfigSource(a.(*core.NodeConfigSource), b.(*v1.NodeConfigSource), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeConfigSource)(nil), (*corev1.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeConfigSource_To_v1_NodeConfigSource(a.(*core.NodeConfigSource), b.(*corev1.NodeConfigSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeConfigStatus)(nil), (*core.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(a.(*v1.NodeConfigStatus), b.(*core.NodeConfigStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeConfigStatus)(nil), (*core.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(a.(*corev1.NodeConfigStatus), b.(*core.NodeConfigStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeConfigStatus)(nil), (*v1.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(a.(*core.NodeConfigStatus), b.(*v1.NodeConfigStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeConfigStatus)(nil), (*corev1.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(a.(*core.NodeConfigStatus), b.(*corev1.NodeConfigStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeDaemonEndpoints)(nil), (*core.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(a.(*v1.NodeDaemonEndpoints), b.(*core.NodeDaemonEndpoints), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeDaemonEndpoints)(nil), (*core.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(a.(*corev1.NodeDaemonEndpoints), b.(*core.NodeDaemonEndpoints), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeDaemonEndpoints)(nil), (*v1.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(a.(*core.NodeDaemonEndpoints), b.(*v1.NodeDaemonEndpoints), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeDaemonEndpoints)(nil), (*corev1.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(a.(*core.NodeDaemonEndpoints), b.(*corev1.NodeDaemonEndpoints), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeFeatures)(nil), (*core.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeFeatures_To_core_NodeFeatures(a.(*v1.NodeFeatures), b.(*core.NodeFeatures), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeFeatures)(nil), (*core.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeFeatures_To_core_NodeFeatures(a.(*corev1.NodeFeatures), b.(*core.NodeFeatures), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeFeatures)(nil), (*v1.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeFeatures_To_v1_NodeFeatures(a.(*core.NodeFeatures), b.(*v1.NodeFeatures), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeFeatures)(nil), (*corev1.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeFeatures_To_v1_NodeFeatures(a.(*core.NodeFeatures), b.(*corev1.NodeFeatures), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeList)(nil), (*core.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeList_To_core_NodeList(a.(*v1.NodeList), b.(*core.NodeList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeList)(nil), (*core.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeList_To_core_NodeList(a.(*corev1.NodeList), b.(*core.NodeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeList)(nil), (*v1.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeList_To_v1_NodeList(a.(*core.NodeList), b.(*v1.NodeList), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeList)(nil), (*corev1.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeList_To_v1_NodeList(a.(*core.NodeList), b.(*corev1.NodeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeProxyOptions)(nil), (*core.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(a.(*v1.NodeProxyOptions), b.(*core.NodeProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeProxyOptions)(nil), (*core.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(a.(*corev1.NodeProxyOptions), b.(*core.NodeProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeProxyOptions)(nil), (*v1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(a.(*core.NodeProxyOptions), b.(*v1.NodeProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeProxyOptions)(nil), (*corev1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(a.(*core.NodeProxyOptions), b.(*corev1.NodeProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeRuntimeHandler)(nil), (*core.NodeRuntimeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(a.(*v1.NodeRuntimeHandler), b.(*core.NodeRuntimeHandler), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeRuntimeHandler)(nil), (*core.NodeRuntimeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(a.(*corev1.NodeRuntimeHandler), b.(*core.NodeRuntimeHandler), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeRuntimeHandler)(nil), (*v1.NodeRuntimeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(a.(*core.NodeRuntimeHandler), b.(*v1.NodeRuntimeHandler), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeRuntimeHandler)(nil), (*corev1.NodeRuntimeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(a.(*core.NodeRuntimeHandler), b.(*corev1.NodeRuntimeHandler), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeRuntimeHandlerFeatures)(nil), (*core.NodeRuntimeHandlerFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(a.(*v1.NodeRuntimeHandlerFeatures), b.(*core.NodeRuntimeHandlerFeatures), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeRuntimeHandlerFeatures)(nil), (*core.NodeRuntimeHandlerFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(a.(*corev1.NodeRuntimeHandlerFeatures), b.(*core.NodeRuntimeHandlerFeatures), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeRuntimeHandlerFeatures)(nil), (*v1.NodeRuntimeHandlerFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(a.(*core.NodeRuntimeHandlerFeatures), b.(*v1.NodeRuntimeHandlerFeatures), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeRuntimeHandlerFeatures)(nil), (*corev1.NodeRuntimeHandlerFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(a.(*core.NodeRuntimeHandlerFeatures), b.(*corev1.NodeRuntimeHandlerFeatures), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeSelector)(nil), (*core.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeSelector_To_core_NodeSelector(a.(*v1.NodeSelector), b.(*core.NodeSelector), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeSelector)(nil), (*core.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSelector_To_core_NodeSelector(a.(*corev1.NodeSelector), b.(*core.NodeSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeSelector)(nil), (*v1.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeSelector_To_v1_NodeSelector(a.(*core.NodeSelector), b.(*v1.NodeSelector), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeSelector)(nil), (*corev1.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSelector_To_v1_NodeSelector(a.(*core.NodeSelector), b.(*corev1.NodeSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeSelectorRequirement)(nil), (*core.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(a.(*v1.NodeSelectorRequirement), b.(*core.NodeSelectorRequirement), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeSelectorRequirement)(nil), (*core.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(a.(*corev1.NodeSelectorRequirement), b.(*core.NodeSelectorRequirement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeSelectorRequirement)(nil), (*v1.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(a.(*core.NodeSelectorRequirement), b.(*v1.NodeSelectorRequirement), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeSelectorRequirement)(nil), (*corev1.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(a.(*core.NodeSelectorRequirement), b.(*corev1.NodeSelectorRequirement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeSelectorTerm)(nil), (*core.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(a.(*v1.NodeSelectorTerm), b.(*core.NodeSelectorTerm), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeSelectorTerm)(nil), (*core.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(a.(*corev1.NodeSelectorTerm), b.(*core.NodeSelectorTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeSelectorTerm)(nil), (*v1.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(a.(*core.NodeSelectorTerm), b.(*v1.NodeSelectorTerm), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeSelectorTerm)(nil), (*corev1.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(a.(*core.NodeSelectorTerm), b.(*corev1.NodeSelectorTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeStatus)(nil), (*core.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeStatus_To_core_NodeStatus(a.(*v1.NodeStatus), b.(*core.NodeStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeStatus)(nil), (*core.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeStatus_To_core_NodeStatus(a.(*corev1.NodeStatus), b.(*core.NodeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeStatus)(nil), (*v1.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeStatus_To_v1_NodeStatus(a.(*core.NodeStatus), b.(*v1.NodeStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeStatus)(nil), (*corev1.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeStatus_To_v1_NodeStatus(a.(*core.NodeStatus), b.(*corev1.NodeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.NodeSystemInfo)(nil), (*core.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(a.(*v1.NodeSystemInfo), b.(*core.NodeSystemInfo), scope) + if err := s.AddGeneratedConversionFunc((*corev1.NodeSystemInfo)(nil), (*core.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(a.(*corev1.NodeSystemInfo), b.(*core.NodeSystemInfo), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.NodeSystemInfo)(nil), (*v1.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(a.(*core.NodeSystemInfo), b.(*v1.NodeSystemInfo), scope) + if err := s.AddGeneratedConversionFunc((*core.NodeSystemInfo)(nil), (*corev1.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(a.(*core.NodeSystemInfo), b.(*corev1.NodeSystemInfo), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ObjectFieldSelector)(nil), (*core.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(a.(*v1.ObjectFieldSelector), b.(*core.ObjectFieldSelector), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ObjectFieldSelector)(nil), (*core.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(a.(*corev1.ObjectFieldSelector), b.(*core.ObjectFieldSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ObjectFieldSelector)(nil), (*v1.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(a.(*core.ObjectFieldSelector), b.(*v1.ObjectFieldSelector), scope) + if err := s.AddGeneratedConversionFunc((*core.ObjectFieldSelector)(nil), (*corev1.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(a.(*core.ObjectFieldSelector), b.(*corev1.ObjectFieldSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ObjectReference)(nil), (*core.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ObjectReference_To_core_ObjectReference(a.(*v1.ObjectReference), b.(*core.ObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ObjectReference)(nil), (*core.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ObjectReference_To_core_ObjectReference(a.(*corev1.ObjectReference), b.(*core.ObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ObjectReference)(nil), (*v1.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ObjectReference_To_v1_ObjectReference(a.(*core.ObjectReference), b.(*v1.ObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*core.ObjectReference)(nil), (*corev1.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ObjectReference_To_v1_ObjectReference(a.(*core.ObjectReference), b.(*corev1.ObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolume)(nil), (*core.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolume_To_core_PersistentVolume(a.(*v1.PersistentVolume), b.(*core.PersistentVolume), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolume)(nil), (*core.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolume_To_core_PersistentVolume(a.(*corev1.PersistentVolume), b.(*core.PersistentVolume), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolume)(nil), (*v1.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolume_To_v1_PersistentVolume(a.(*core.PersistentVolume), b.(*v1.PersistentVolume), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolume)(nil), (*corev1.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolume_To_v1_PersistentVolume(a.(*core.PersistentVolume), b.(*corev1.PersistentVolume), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaim)(nil), (*core.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(a.(*v1.PersistentVolumeClaim), b.(*core.PersistentVolumeClaim), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaim)(nil), (*core.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(a.(*corev1.PersistentVolumeClaim), b.(*core.PersistentVolumeClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaim)(nil), (*v1.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(a.(*core.PersistentVolumeClaim), b.(*v1.PersistentVolumeClaim), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaim)(nil), (*corev1.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(a.(*core.PersistentVolumeClaim), b.(*corev1.PersistentVolumeClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimCondition)(nil), (*core.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(a.(*v1.PersistentVolumeClaimCondition), b.(*core.PersistentVolumeClaimCondition), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimCondition)(nil), (*core.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(a.(*corev1.PersistentVolumeClaimCondition), b.(*core.PersistentVolumeClaimCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimCondition)(nil), (*v1.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(a.(*core.PersistentVolumeClaimCondition), b.(*v1.PersistentVolumeClaimCondition), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimCondition)(nil), (*corev1.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(a.(*core.PersistentVolumeClaimCondition), b.(*corev1.PersistentVolumeClaimCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimList)(nil), (*core.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(a.(*v1.PersistentVolumeClaimList), b.(*core.PersistentVolumeClaimList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimList)(nil), (*core.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(a.(*corev1.PersistentVolumeClaimList), b.(*core.PersistentVolumeClaimList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimList)(nil), (*v1.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(a.(*core.PersistentVolumeClaimList), b.(*v1.PersistentVolumeClaimList), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimList)(nil), (*corev1.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(a.(*core.PersistentVolumeClaimList), b.(*corev1.PersistentVolumeClaimList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimSpec)(nil), (*core.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(a.(*v1.PersistentVolumeClaimSpec), b.(*core.PersistentVolumeClaimSpec), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimSpec)(nil), (*core.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(a.(*corev1.PersistentVolumeClaimSpec), b.(*core.PersistentVolumeClaimSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimSpec)(nil), (*v1.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(a.(*core.PersistentVolumeClaimSpec), b.(*v1.PersistentVolumeClaimSpec), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimSpec)(nil), (*corev1.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(a.(*core.PersistentVolumeClaimSpec), b.(*corev1.PersistentVolumeClaimSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimStatus)(nil), (*core.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(a.(*v1.PersistentVolumeClaimStatus), b.(*core.PersistentVolumeClaimStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimStatus)(nil), (*core.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(a.(*corev1.PersistentVolumeClaimStatus), b.(*core.PersistentVolumeClaimStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimStatus)(nil), (*v1.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(a.(*core.PersistentVolumeClaimStatus), b.(*v1.PersistentVolumeClaimStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimStatus)(nil), (*corev1.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(a.(*core.PersistentVolumeClaimStatus), b.(*corev1.PersistentVolumeClaimStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimTemplate)(nil), (*core.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(a.(*v1.PersistentVolumeClaimTemplate), b.(*core.PersistentVolumeClaimTemplate), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimTemplate)(nil), (*core.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(a.(*corev1.PersistentVolumeClaimTemplate), b.(*core.PersistentVolumeClaimTemplate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimTemplate)(nil), (*v1.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(a.(*core.PersistentVolumeClaimTemplate), b.(*v1.PersistentVolumeClaimTemplate), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimTemplate)(nil), (*corev1.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(a.(*core.PersistentVolumeClaimTemplate), b.(*corev1.PersistentVolumeClaimTemplate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimVolumeSource)(nil), (*core.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(a.(*v1.PersistentVolumeClaimVolumeSource), b.(*core.PersistentVolumeClaimVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimVolumeSource)(nil), (*core.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(a.(*corev1.PersistentVolumeClaimVolumeSource), b.(*core.PersistentVolumeClaimVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimVolumeSource)(nil), (*v1.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(a.(*core.PersistentVolumeClaimVolumeSource), b.(*v1.PersistentVolumeClaimVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimVolumeSource)(nil), (*corev1.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(a.(*core.PersistentVolumeClaimVolumeSource), b.(*corev1.PersistentVolumeClaimVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeList)(nil), (*core.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(a.(*v1.PersistentVolumeList), b.(*core.PersistentVolumeList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeList)(nil), (*core.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(a.(*corev1.PersistentVolumeList), b.(*core.PersistentVolumeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeList)(nil), (*v1.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(a.(*core.PersistentVolumeList), b.(*v1.PersistentVolumeList), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeList)(nil), (*corev1.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(a.(*core.PersistentVolumeList), b.(*corev1.PersistentVolumeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeSource)(nil), (*core.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(a.(*v1.PersistentVolumeSource), b.(*core.PersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeSource)(nil), (*core.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(a.(*corev1.PersistentVolumeSource), b.(*core.PersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeSource)(nil), (*v1.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(a.(*core.PersistentVolumeSource), b.(*v1.PersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeSource)(nil), (*corev1.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(a.(*core.PersistentVolumeSource), b.(*corev1.PersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeStatus)(nil), (*core.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(a.(*v1.PersistentVolumeStatus), b.(*core.PersistentVolumeStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeStatus)(nil), (*core.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(a.(*corev1.PersistentVolumeStatus), b.(*core.PersistentVolumeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeStatus)(nil), (*v1.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(a.(*core.PersistentVolumeStatus), b.(*v1.PersistentVolumeStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeStatus)(nil), (*corev1.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(a.(*core.PersistentVolumeStatus), b.(*corev1.PersistentVolumeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PhotonPersistentDiskVolumeSource)(nil), (*core.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(a.(*v1.PhotonPersistentDiskVolumeSource), b.(*core.PhotonPersistentDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PhotonPersistentDiskVolumeSource)(nil), (*core.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(a.(*corev1.PhotonPersistentDiskVolumeSource), b.(*core.PhotonPersistentDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PhotonPersistentDiskVolumeSource)(nil), (*v1.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(a.(*core.PhotonPersistentDiskVolumeSource), b.(*v1.PhotonPersistentDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.PhotonPersistentDiskVolumeSource)(nil), (*corev1.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(a.(*core.PhotonPersistentDiskVolumeSource), b.(*corev1.PhotonPersistentDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodAffinity)(nil), (*core.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodAffinity_To_core_PodAffinity(a.(*v1.PodAffinity), b.(*core.PodAffinity), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodAffinity)(nil), (*core.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAffinity_To_core_PodAffinity(a.(*corev1.PodAffinity), b.(*core.PodAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodAffinity)(nil), (*v1.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodAffinity_To_v1_PodAffinity(a.(*core.PodAffinity), b.(*v1.PodAffinity), scope) + if err := s.AddGeneratedConversionFunc((*core.PodAffinity)(nil), (*corev1.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAffinity_To_v1_PodAffinity(a.(*core.PodAffinity), b.(*corev1.PodAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodAffinityTerm)(nil), (*core.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(a.(*v1.PodAffinityTerm), b.(*core.PodAffinityTerm), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodAffinityTerm)(nil), (*core.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(a.(*corev1.PodAffinityTerm), b.(*core.PodAffinityTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodAffinityTerm)(nil), (*v1.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(a.(*core.PodAffinityTerm), b.(*v1.PodAffinityTerm), scope) + if err := s.AddGeneratedConversionFunc((*core.PodAffinityTerm)(nil), (*corev1.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(a.(*core.PodAffinityTerm), b.(*corev1.PodAffinityTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodAntiAffinity)(nil), (*core.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(a.(*v1.PodAntiAffinity), b.(*core.PodAntiAffinity), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodAntiAffinity)(nil), (*core.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(a.(*corev1.PodAntiAffinity), b.(*core.PodAntiAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodAntiAffinity)(nil), (*v1.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(a.(*core.PodAntiAffinity), b.(*v1.PodAntiAffinity), scope) + if err := s.AddGeneratedConversionFunc((*core.PodAntiAffinity)(nil), (*corev1.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(a.(*core.PodAntiAffinity), b.(*corev1.PodAntiAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodAttachOptions)(nil), (*core.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodAttachOptions_To_core_PodAttachOptions(a.(*v1.PodAttachOptions), b.(*core.PodAttachOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodAttachOptions)(nil), (*core.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAttachOptions_To_core_PodAttachOptions(a.(*corev1.PodAttachOptions), b.(*core.PodAttachOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodAttachOptions)(nil), (*v1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodAttachOptions_To_v1_PodAttachOptions(a.(*core.PodAttachOptions), b.(*v1.PodAttachOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.PodAttachOptions)(nil), (*corev1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAttachOptions_To_v1_PodAttachOptions(a.(*core.PodAttachOptions), b.(*corev1.PodAttachOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodCondition)(nil), (*core.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodCondition_To_core_PodCondition(a.(*v1.PodCondition), b.(*core.PodCondition), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodCondition)(nil), (*core.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodCondition_To_core_PodCondition(a.(*corev1.PodCondition), b.(*core.PodCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodCondition)(nil), (*v1.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodCondition_To_v1_PodCondition(a.(*core.PodCondition), b.(*v1.PodCondition), scope) + if err := s.AddGeneratedConversionFunc((*core.PodCondition)(nil), (*corev1.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodCondition_To_v1_PodCondition(a.(*core.PodCondition), b.(*corev1.PodCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodDNSConfig)(nil), (*core.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodDNSConfig_To_core_PodDNSConfig(a.(*v1.PodDNSConfig), b.(*core.PodDNSConfig), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodDNSConfig)(nil), (*core.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodDNSConfig_To_core_PodDNSConfig(a.(*corev1.PodDNSConfig), b.(*core.PodDNSConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodDNSConfig)(nil), (*v1.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodDNSConfig_To_v1_PodDNSConfig(a.(*core.PodDNSConfig), b.(*v1.PodDNSConfig), scope) + if err := s.AddGeneratedConversionFunc((*core.PodDNSConfig)(nil), (*corev1.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodDNSConfig_To_v1_PodDNSConfig(a.(*core.PodDNSConfig), b.(*corev1.PodDNSConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodDNSConfigOption)(nil), (*core.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(a.(*v1.PodDNSConfigOption), b.(*core.PodDNSConfigOption), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodDNSConfigOption)(nil), (*core.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(a.(*corev1.PodDNSConfigOption), b.(*core.PodDNSConfigOption), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodDNSConfigOption)(nil), (*v1.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(a.(*core.PodDNSConfigOption), b.(*v1.PodDNSConfigOption), scope) + if err := s.AddGeneratedConversionFunc((*core.PodDNSConfigOption)(nil), (*corev1.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(a.(*core.PodDNSConfigOption), b.(*corev1.PodDNSConfigOption), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodExecOptions)(nil), (*core.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodExecOptions_To_core_PodExecOptions(a.(*v1.PodExecOptions), b.(*core.PodExecOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodExecOptions)(nil), (*core.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodExecOptions_To_core_PodExecOptions(a.(*corev1.PodExecOptions), b.(*core.PodExecOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodExecOptions)(nil), (*v1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodExecOptions_To_v1_PodExecOptions(a.(*core.PodExecOptions), b.(*v1.PodExecOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.PodExecOptions)(nil), (*corev1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodExecOptions_To_v1_PodExecOptions(a.(*core.PodExecOptions), b.(*corev1.PodExecOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodIP)(nil), (*core.PodIP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodIP_To_core_PodIP(a.(*v1.PodIP), b.(*core.PodIP), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodIP)(nil), (*core.PodIP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodIP_To_core_PodIP(a.(*corev1.PodIP), b.(*core.PodIP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodIP)(nil), (*v1.PodIP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodIP_To_v1_PodIP(a.(*core.PodIP), b.(*v1.PodIP), scope) + if err := s.AddGeneratedConversionFunc((*core.PodIP)(nil), (*corev1.PodIP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodIP_To_v1_PodIP(a.(*core.PodIP), b.(*corev1.PodIP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodList)(nil), (*core.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodList_To_core_PodList(a.(*v1.PodList), b.(*core.PodList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodList)(nil), (*core.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodList_To_core_PodList(a.(*corev1.PodList), b.(*core.PodList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodList)(nil), (*v1.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodList_To_v1_PodList(a.(*core.PodList), b.(*v1.PodList), scope) + if err := s.AddGeneratedConversionFunc((*core.PodList)(nil), (*corev1.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodList_To_v1_PodList(a.(*core.PodList), b.(*corev1.PodList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodLogOptions)(nil), (*core.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodLogOptions_To_core_PodLogOptions(a.(*v1.PodLogOptions), b.(*core.PodLogOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodLogOptions)(nil), (*core.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodLogOptions_To_core_PodLogOptions(a.(*corev1.PodLogOptions), b.(*core.PodLogOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodLogOptions)(nil), (*v1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodLogOptions_To_v1_PodLogOptions(a.(*core.PodLogOptions), b.(*v1.PodLogOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.PodLogOptions)(nil), (*corev1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodLogOptions_To_v1_PodLogOptions(a.(*core.PodLogOptions), b.(*corev1.PodLogOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodOS)(nil), (*core.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodOS_To_core_PodOS(a.(*v1.PodOS), b.(*core.PodOS), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodOS)(nil), (*core.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodOS_To_core_PodOS(a.(*corev1.PodOS), b.(*core.PodOS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodOS)(nil), (*v1.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodOS_To_v1_PodOS(a.(*core.PodOS), b.(*v1.PodOS), scope) + if err := s.AddGeneratedConversionFunc((*core.PodOS)(nil), (*corev1.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodOS_To_v1_PodOS(a.(*core.PodOS), b.(*corev1.PodOS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodPortForwardOptions)(nil), (*core.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(a.(*v1.PodPortForwardOptions), b.(*core.PodPortForwardOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodPortForwardOptions)(nil), (*core.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(a.(*corev1.PodPortForwardOptions), b.(*core.PodPortForwardOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodPortForwardOptions)(nil), (*v1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(a.(*core.PodPortForwardOptions), b.(*v1.PodPortForwardOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.PodPortForwardOptions)(nil), (*corev1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(a.(*core.PodPortForwardOptions), b.(*corev1.PodPortForwardOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodProxyOptions)(nil), (*core.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodProxyOptions_To_core_PodProxyOptions(a.(*v1.PodProxyOptions), b.(*core.PodProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodProxyOptions)(nil), (*core.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodProxyOptions_To_core_PodProxyOptions(a.(*corev1.PodProxyOptions), b.(*core.PodProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodProxyOptions)(nil), (*v1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodProxyOptions_To_v1_PodProxyOptions(a.(*core.PodProxyOptions), b.(*v1.PodProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.PodProxyOptions)(nil), (*corev1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodProxyOptions_To_v1_PodProxyOptions(a.(*core.PodProxyOptions), b.(*corev1.PodProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodReadinessGate)(nil), (*core.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodReadinessGate_To_core_PodReadinessGate(a.(*v1.PodReadinessGate), b.(*core.PodReadinessGate), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodReadinessGate)(nil), (*core.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodReadinessGate_To_core_PodReadinessGate(a.(*corev1.PodReadinessGate), b.(*core.PodReadinessGate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodReadinessGate)(nil), (*v1.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodReadinessGate_To_v1_PodReadinessGate(a.(*core.PodReadinessGate), b.(*v1.PodReadinessGate), scope) + if err := s.AddGeneratedConversionFunc((*core.PodReadinessGate)(nil), (*corev1.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodReadinessGate_To_v1_PodReadinessGate(a.(*core.PodReadinessGate), b.(*corev1.PodReadinessGate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodResourceClaim)(nil), (*core.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodResourceClaim_To_core_PodResourceClaim(a.(*v1.PodResourceClaim), b.(*core.PodResourceClaim), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodResourceClaim)(nil), (*core.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodResourceClaim_To_core_PodResourceClaim(a.(*corev1.PodResourceClaim), b.(*core.PodResourceClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodResourceClaim)(nil), (*v1.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodResourceClaim_To_v1_PodResourceClaim(a.(*core.PodResourceClaim), b.(*v1.PodResourceClaim), scope) + if err := s.AddGeneratedConversionFunc((*core.PodResourceClaim)(nil), (*corev1.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodResourceClaim_To_v1_PodResourceClaim(a.(*core.PodResourceClaim), b.(*corev1.PodResourceClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodResourceClaimStatus)(nil), (*core.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(a.(*v1.PodResourceClaimStatus), b.(*core.PodResourceClaimStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodResourceClaimStatus)(nil), (*core.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(a.(*corev1.PodResourceClaimStatus), b.(*core.PodResourceClaimStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodResourceClaimStatus)(nil), (*v1.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(a.(*core.PodResourceClaimStatus), b.(*v1.PodResourceClaimStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.PodResourceClaimStatus)(nil), (*corev1.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(a.(*core.PodResourceClaimStatus), b.(*corev1.PodResourceClaimStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*v1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*corev1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodSchedulingGate)(nil), (*v1.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(a.(*core.PodSchedulingGate), b.(*v1.PodSchedulingGate), scope) + if err := s.AddGeneratedConversionFunc((*core.PodSchedulingGate)(nil), (*corev1.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(a.(*core.PodSchedulingGate), b.(*corev1.PodSchedulingGate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*corev1.PodSecurityContext), b.(*core.PodSecurityContext), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodSecurityContext)(nil), (*v1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*v1.PodSecurityContext), scope) + if err := s.AddGeneratedConversionFunc((*core.PodSecurityContext)(nil), (*corev1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*corev1.PodSecurityContext), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodSignature)(nil), (*core.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodSignature_To_core_PodSignature(a.(*v1.PodSignature), b.(*core.PodSignature), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodSignature)(nil), (*core.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSignature_To_core_PodSignature(a.(*corev1.PodSignature), b.(*core.PodSignature), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodSignature)(nil), (*v1.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodSignature_To_v1_PodSignature(a.(*core.PodSignature), b.(*v1.PodSignature), scope) + if err := s.AddGeneratedConversionFunc((*core.PodSignature)(nil), (*corev1.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSignature_To_v1_PodSignature(a.(*core.PodSignature), b.(*corev1.PodSignature), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodStatusResult)(nil), (*core.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodStatusResult_To_core_PodStatusResult(a.(*v1.PodStatusResult), b.(*core.PodStatusResult), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodStatusResult)(nil), (*core.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodStatusResult_To_core_PodStatusResult(a.(*corev1.PodStatusResult), b.(*core.PodStatusResult), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodStatusResult)(nil), (*v1.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodStatusResult_To_v1_PodStatusResult(a.(*core.PodStatusResult), b.(*v1.PodStatusResult), scope) + if err := s.AddGeneratedConversionFunc((*core.PodStatusResult)(nil), (*corev1.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodStatusResult_To_v1_PodStatusResult(a.(*core.PodStatusResult), b.(*corev1.PodStatusResult), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodTemplate)(nil), (*core.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodTemplate_To_core_PodTemplate(a.(*v1.PodTemplate), b.(*core.PodTemplate), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodTemplate)(nil), (*core.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplate_To_core_PodTemplate(a.(*corev1.PodTemplate), b.(*core.PodTemplate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodTemplate)(nil), (*v1.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodTemplate_To_v1_PodTemplate(a.(*core.PodTemplate), b.(*v1.PodTemplate), scope) + if err := s.AddGeneratedConversionFunc((*core.PodTemplate)(nil), (*corev1.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplate_To_v1_PodTemplate(a.(*core.PodTemplate), b.(*corev1.PodTemplate), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PodTemplateList)(nil), (*core.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodTemplateList_To_core_PodTemplateList(a.(*v1.PodTemplateList), b.(*core.PodTemplateList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PodTemplateList)(nil), (*core.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplateList_To_core_PodTemplateList(a.(*corev1.PodTemplateList), b.(*core.PodTemplateList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PodTemplateList)(nil), (*v1.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodTemplateList_To_v1_PodTemplateList(a.(*core.PodTemplateList), b.(*v1.PodTemplateList), scope) + if err := s.AddGeneratedConversionFunc((*core.PodTemplateList)(nil), (*corev1.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplateList_To_v1_PodTemplateList(a.(*core.PodTemplateList), b.(*corev1.PodTemplateList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PortStatus)(nil), (*core.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PortStatus_To_core_PortStatus(a.(*v1.PortStatus), b.(*core.PortStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PortStatus)(nil), (*core.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PortStatus_To_core_PortStatus(a.(*corev1.PortStatus), b.(*core.PortStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PortStatus)(nil), (*v1.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PortStatus_To_v1_PortStatus(a.(*core.PortStatus), b.(*v1.PortStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.PortStatus)(nil), (*corev1.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PortStatus_To_v1_PortStatus(a.(*core.PortStatus), b.(*corev1.PortStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PortworxVolumeSource)(nil), (*core.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(a.(*v1.PortworxVolumeSource), b.(*core.PortworxVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PortworxVolumeSource)(nil), (*core.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(a.(*corev1.PortworxVolumeSource), b.(*core.PortworxVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PortworxVolumeSource)(nil), (*v1.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(a.(*core.PortworxVolumeSource), b.(*v1.PortworxVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.PortworxVolumeSource)(nil), (*corev1.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(a.(*core.PortworxVolumeSource), b.(*corev1.PortworxVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Preconditions)(nil), (*core.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Preconditions_To_core_Preconditions(a.(*v1.Preconditions), b.(*core.Preconditions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Preconditions)(nil), (*core.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Preconditions_To_core_Preconditions(a.(*corev1.Preconditions), b.(*core.Preconditions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Preconditions)(nil), (*v1.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Preconditions_To_v1_Preconditions(a.(*core.Preconditions), b.(*v1.Preconditions), scope) + if err := s.AddGeneratedConversionFunc((*core.Preconditions)(nil), (*corev1.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Preconditions_To_v1_Preconditions(a.(*core.Preconditions), b.(*corev1.Preconditions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PreferAvoidPodsEntry)(nil), (*core.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(a.(*v1.PreferAvoidPodsEntry), b.(*core.PreferAvoidPodsEntry), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PreferAvoidPodsEntry)(nil), (*core.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(a.(*corev1.PreferAvoidPodsEntry), b.(*core.PreferAvoidPodsEntry), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PreferAvoidPodsEntry)(nil), (*v1.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(a.(*core.PreferAvoidPodsEntry), b.(*v1.PreferAvoidPodsEntry), scope) + if err := s.AddGeneratedConversionFunc((*core.PreferAvoidPodsEntry)(nil), (*corev1.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(a.(*core.PreferAvoidPodsEntry), b.(*corev1.PreferAvoidPodsEntry), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.PreferredSchedulingTerm)(nil), (*core.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(a.(*v1.PreferredSchedulingTerm), b.(*core.PreferredSchedulingTerm), scope) + if err := s.AddGeneratedConversionFunc((*corev1.PreferredSchedulingTerm)(nil), (*core.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(a.(*corev1.PreferredSchedulingTerm), b.(*core.PreferredSchedulingTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.PreferredSchedulingTerm)(nil), (*v1.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(a.(*core.PreferredSchedulingTerm), b.(*v1.PreferredSchedulingTerm), scope) + if err := s.AddGeneratedConversionFunc((*core.PreferredSchedulingTerm)(nil), (*corev1.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(a.(*core.PreferredSchedulingTerm), b.(*corev1.PreferredSchedulingTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Probe)(nil), (*core.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Probe_To_core_Probe(a.(*v1.Probe), b.(*core.Probe), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Probe)(nil), (*core.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Probe_To_core_Probe(a.(*corev1.Probe), b.(*core.Probe), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Probe)(nil), (*v1.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Probe_To_v1_Probe(a.(*core.Probe), b.(*v1.Probe), scope) + if err := s.AddGeneratedConversionFunc((*core.Probe)(nil), (*corev1.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Probe_To_v1_Probe(a.(*core.Probe), b.(*corev1.Probe), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ProbeHandler)(nil), (*core.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ProbeHandler_To_core_ProbeHandler(a.(*v1.ProbeHandler), b.(*core.ProbeHandler), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ProbeHandler)(nil), (*core.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProbeHandler_To_core_ProbeHandler(a.(*corev1.ProbeHandler), b.(*core.ProbeHandler), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ProbeHandler)(nil), (*v1.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ProbeHandler_To_v1_ProbeHandler(a.(*core.ProbeHandler), b.(*v1.ProbeHandler), scope) + if err := s.AddGeneratedConversionFunc((*core.ProbeHandler)(nil), (*corev1.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProbeHandler_To_v1_ProbeHandler(a.(*core.ProbeHandler), b.(*corev1.ProbeHandler), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ProjectedVolumeSource)(nil), (*core.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(a.(*v1.ProjectedVolumeSource), b.(*core.ProjectedVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ProjectedVolumeSource)(nil), (*core.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(a.(*corev1.ProjectedVolumeSource), b.(*core.ProjectedVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ProjectedVolumeSource)(nil), (*v1.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(a.(*core.ProjectedVolumeSource), b.(*v1.ProjectedVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ProjectedVolumeSource)(nil), (*corev1.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(a.(*core.ProjectedVolumeSource), b.(*corev1.ProjectedVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.QuobyteVolumeSource)(nil), (*core.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(a.(*v1.QuobyteVolumeSource), b.(*core.QuobyteVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.QuobyteVolumeSource)(nil), (*core.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(a.(*corev1.QuobyteVolumeSource), b.(*core.QuobyteVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.QuobyteVolumeSource)(nil), (*v1.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(a.(*core.QuobyteVolumeSource), b.(*v1.QuobyteVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.QuobyteVolumeSource)(nil), (*corev1.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(a.(*core.QuobyteVolumeSource), b.(*corev1.QuobyteVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RBDPersistentVolumeSource)(nil), (*core.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(a.(*v1.RBDPersistentVolumeSource), b.(*core.RBDPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.RBDPersistentVolumeSource)(nil), (*core.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(a.(*corev1.RBDPersistentVolumeSource), b.(*core.RBDPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.RBDPersistentVolumeSource)(nil), (*v1.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(a.(*core.RBDPersistentVolumeSource), b.(*v1.RBDPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.RBDPersistentVolumeSource)(nil), (*corev1.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(a.(*core.RBDPersistentVolumeSource), b.(*corev1.RBDPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RBDVolumeSource)(nil), (*core.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(a.(*v1.RBDVolumeSource), b.(*core.RBDVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.RBDVolumeSource)(nil), (*core.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(a.(*corev1.RBDVolumeSource), b.(*core.RBDVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.RBDVolumeSource)(nil), (*v1.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(a.(*core.RBDVolumeSource), b.(*v1.RBDVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.RBDVolumeSource)(nil), (*corev1.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(a.(*core.RBDVolumeSource), b.(*corev1.RBDVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RangeAllocation)(nil), (*core.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RangeAllocation_To_core_RangeAllocation(a.(*v1.RangeAllocation), b.(*core.RangeAllocation), scope) + if err := s.AddGeneratedConversionFunc((*corev1.RangeAllocation)(nil), (*core.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RangeAllocation_To_core_RangeAllocation(a.(*corev1.RangeAllocation), b.(*core.RangeAllocation), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.RangeAllocation)(nil), (*v1.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_RangeAllocation_To_v1_RangeAllocation(a.(*core.RangeAllocation), b.(*v1.RangeAllocation), scope) + if err := s.AddGeneratedConversionFunc((*core.RangeAllocation)(nil), (*corev1.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_RangeAllocation_To_v1_RangeAllocation(a.(*core.RangeAllocation), b.(*corev1.RangeAllocation), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicationController)(nil), (*core.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationController_To_core_ReplicationController(a.(*v1.ReplicationController), b.(*core.ReplicationController), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ReplicationController)(nil), (*core.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationController_To_core_ReplicationController(a.(*corev1.ReplicationController), b.(*core.ReplicationController), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ReplicationController)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ReplicationController_To_v1_ReplicationController(a.(*core.ReplicationController), b.(*v1.ReplicationController), scope) + if err := s.AddGeneratedConversionFunc((*core.ReplicationController)(nil), (*corev1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationController_To_v1_ReplicationController(a.(*core.ReplicationController), b.(*corev1.ReplicationController), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerCondition)(nil), (*core.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(a.(*v1.ReplicationControllerCondition), b.(*core.ReplicationControllerCondition), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ReplicationControllerCondition)(nil), (*core.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(a.(*corev1.ReplicationControllerCondition), b.(*core.ReplicationControllerCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerCondition)(nil), (*v1.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(a.(*core.ReplicationControllerCondition), b.(*v1.ReplicationControllerCondition), scope) + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerCondition)(nil), (*corev1.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(a.(*core.ReplicationControllerCondition), b.(*corev1.ReplicationControllerCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerList)(nil), (*core.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(a.(*v1.ReplicationControllerList), b.(*core.ReplicationControllerList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ReplicationControllerList)(nil), (*core.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(a.(*corev1.ReplicationControllerList), b.(*core.ReplicationControllerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerList)(nil), (*v1.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(a.(*core.ReplicationControllerList), b.(*v1.ReplicationControllerList), scope) + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerList)(nil), (*corev1.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(a.(*core.ReplicationControllerList), b.(*corev1.ReplicationControllerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerStatus)(nil), (*core.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(a.(*v1.ReplicationControllerStatus), b.(*core.ReplicationControllerStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ReplicationControllerStatus)(nil), (*core.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(a.(*corev1.ReplicationControllerStatus), b.(*core.ReplicationControllerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(a.(*core.ReplicationControllerStatus), b.(*v1.ReplicationControllerStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerStatus)(nil), (*corev1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(a.(*core.ReplicationControllerStatus), b.(*corev1.ReplicationControllerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceClaim)(nil), (*core.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceClaim_To_core_ResourceClaim(a.(*v1.ResourceClaim), b.(*core.ResourceClaim), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceClaim)(nil), (*core.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceClaim_To_core_ResourceClaim(a.(*corev1.ResourceClaim), b.(*core.ResourceClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceClaim)(nil), (*v1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceClaim_To_v1_ResourceClaim(a.(*core.ResourceClaim), b.(*v1.ResourceClaim), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceClaim)(nil), (*corev1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceClaim_To_v1_ResourceClaim(a.(*core.ResourceClaim), b.(*corev1.ResourceClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*v1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*corev1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceFieldSelector)(nil), (*v1.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(a.(*core.ResourceFieldSelector), b.(*v1.ResourceFieldSelector), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceFieldSelector)(nil), (*corev1.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(a.(*core.ResourceFieldSelector), b.(*corev1.ResourceFieldSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceHealth)(nil), (*core.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceHealth_To_core_ResourceHealth(a.(*v1.ResourceHealth), b.(*core.ResourceHealth), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceHealth)(nil), (*core.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceHealth_To_core_ResourceHealth(a.(*corev1.ResourceHealth), b.(*core.ResourceHealth), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceHealth)(nil), (*v1.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceHealth_To_v1_ResourceHealth(a.(*core.ResourceHealth), b.(*v1.ResourceHealth), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceHealth)(nil), (*corev1.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceHealth_To_v1_ResourceHealth(a.(*core.ResourceHealth), b.(*corev1.ResourceHealth), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceQuota)(nil), (*core.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceQuota_To_core_ResourceQuota(a.(*v1.ResourceQuota), b.(*core.ResourceQuota), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuota)(nil), (*core.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuota_To_core_ResourceQuota(a.(*corev1.ResourceQuota), b.(*core.ResourceQuota), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceQuota)(nil), (*v1.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceQuota_To_v1_ResourceQuota(a.(*core.ResourceQuota), b.(*v1.ResourceQuota), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceQuota)(nil), (*corev1.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuota_To_v1_ResourceQuota(a.(*core.ResourceQuota), b.(*corev1.ResourceQuota), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceQuotaList)(nil), (*core.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(a.(*v1.ResourceQuotaList), b.(*core.ResourceQuotaList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuotaList)(nil), (*core.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(a.(*corev1.ResourceQuotaList), b.(*core.ResourceQuotaList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaList)(nil), (*v1.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(a.(*core.ResourceQuotaList), b.(*v1.ResourceQuotaList), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaList)(nil), (*corev1.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(a.(*core.ResourceQuotaList), b.(*corev1.ResourceQuotaList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceQuotaSpec)(nil), (*core.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(a.(*v1.ResourceQuotaSpec), b.(*core.ResourceQuotaSpec), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuotaSpec)(nil), (*core.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(a.(*corev1.ResourceQuotaSpec), b.(*core.ResourceQuotaSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaSpec)(nil), (*v1.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(a.(*core.ResourceQuotaSpec), b.(*v1.ResourceQuotaSpec), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaSpec)(nil), (*corev1.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(a.(*core.ResourceQuotaSpec), b.(*corev1.ResourceQuotaSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceQuotaStatus)(nil), (*core.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(a.(*v1.ResourceQuotaStatus), b.(*core.ResourceQuotaStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuotaStatus)(nil), (*core.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(a.(*corev1.ResourceQuotaStatus), b.(*core.ResourceQuotaStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaStatus)(nil), (*v1.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(a.(*core.ResourceQuotaStatus), b.(*v1.ResourceQuotaStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaStatus)(nil), (*corev1.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(a.(*core.ResourceQuotaStatus), b.(*corev1.ResourceQuotaStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceRequirements)(nil), (*core.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceRequirements_To_core_ResourceRequirements(a.(*v1.ResourceRequirements), b.(*core.ResourceRequirements), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceRequirements)(nil), (*core.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceRequirements_To_core_ResourceRequirements(a.(*corev1.ResourceRequirements), b.(*core.ResourceRequirements), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceRequirements)(nil), (*v1.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceRequirements_To_v1_ResourceRequirements(a.(*core.ResourceRequirements), b.(*v1.ResourceRequirements), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceRequirements)(nil), (*corev1.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceRequirements_To_v1_ResourceRequirements(a.(*core.ResourceRequirements), b.(*corev1.ResourceRequirements), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ResourceStatus)(nil), (*core.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceStatus_To_core_ResourceStatus(a.(*v1.ResourceStatus), b.(*core.ResourceStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ResourceStatus)(nil), (*core.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceStatus_To_core_ResourceStatus(a.(*corev1.ResourceStatus), b.(*core.ResourceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ResourceStatus)(nil), (*v1.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ResourceStatus_To_v1_ResourceStatus(a.(*core.ResourceStatus), b.(*v1.ResourceStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ResourceStatus)(nil), (*corev1.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceStatus_To_v1_ResourceStatus(a.(*core.ResourceStatus), b.(*corev1.ResourceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SELinuxOptions)(nil), (*core.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SELinuxOptions_To_core_SELinuxOptions(a.(*v1.SELinuxOptions), b.(*core.SELinuxOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SELinuxOptions)(nil), (*core.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SELinuxOptions_To_core_SELinuxOptions(a.(*corev1.SELinuxOptions), b.(*core.SELinuxOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SELinuxOptions)(nil), (*v1.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SELinuxOptions_To_v1_SELinuxOptions(a.(*core.SELinuxOptions), b.(*v1.SELinuxOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.SELinuxOptions)(nil), (*corev1.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SELinuxOptions_To_v1_SELinuxOptions(a.(*core.SELinuxOptions), b.(*corev1.SELinuxOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ScaleIOPersistentVolumeSource)(nil), (*core.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(a.(*v1.ScaleIOPersistentVolumeSource), b.(*core.ScaleIOPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ScaleIOPersistentVolumeSource)(nil), (*core.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(a.(*corev1.ScaleIOPersistentVolumeSource), b.(*core.ScaleIOPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ScaleIOPersistentVolumeSource)(nil), (*v1.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(a.(*core.ScaleIOPersistentVolumeSource), b.(*v1.ScaleIOPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ScaleIOPersistentVolumeSource)(nil), (*corev1.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(a.(*core.ScaleIOPersistentVolumeSource), b.(*corev1.ScaleIOPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ScaleIOVolumeSource)(nil), (*core.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(a.(*v1.ScaleIOVolumeSource), b.(*core.ScaleIOVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ScaleIOVolumeSource)(nil), (*core.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(a.(*corev1.ScaleIOVolumeSource), b.(*core.ScaleIOVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ScaleIOVolumeSource)(nil), (*v1.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(a.(*core.ScaleIOVolumeSource), b.(*v1.ScaleIOVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.ScaleIOVolumeSource)(nil), (*corev1.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(a.(*core.ScaleIOVolumeSource), b.(*corev1.ScaleIOVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ScopeSelector)(nil), (*core.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScopeSelector_To_core_ScopeSelector(a.(*v1.ScopeSelector), b.(*core.ScopeSelector), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ScopeSelector)(nil), (*core.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScopeSelector_To_core_ScopeSelector(a.(*corev1.ScopeSelector), b.(*core.ScopeSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ScopeSelector)(nil), (*v1.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ScopeSelector_To_v1_ScopeSelector(a.(*core.ScopeSelector), b.(*v1.ScopeSelector), scope) + if err := s.AddGeneratedConversionFunc((*core.ScopeSelector)(nil), (*corev1.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScopeSelector_To_v1_ScopeSelector(a.(*core.ScopeSelector), b.(*corev1.ScopeSelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ScopedResourceSelectorRequirement)(nil), (*core.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(a.(*v1.ScopedResourceSelectorRequirement), b.(*core.ScopedResourceSelectorRequirement), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ScopedResourceSelectorRequirement)(nil), (*core.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(a.(*corev1.ScopedResourceSelectorRequirement), b.(*core.ScopedResourceSelectorRequirement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ScopedResourceSelectorRequirement)(nil), (*v1.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(a.(*core.ScopedResourceSelectorRequirement), b.(*v1.ScopedResourceSelectorRequirement), scope) + if err := s.AddGeneratedConversionFunc((*core.ScopedResourceSelectorRequirement)(nil), (*corev1.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(a.(*core.ScopedResourceSelectorRequirement), b.(*corev1.ScopedResourceSelectorRequirement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SeccompProfile)(nil), (*core.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SeccompProfile_To_core_SeccompProfile(a.(*v1.SeccompProfile), b.(*core.SeccompProfile), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SeccompProfile)(nil), (*core.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SeccompProfile_To_core_SeccompProfile(a.(*corev1.SeccompProfile), b.(*core.SeccompProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SeccompProfile)(nil), (*v1.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SeccompProfile_To_v1_SeccompProfile(a.(*core.SeccompProfile), b.(*v1.SeccompProfile), scope) + if err := s.AddGeneratedConversionFunc((*core.SeccompProfile)(nil), (*corev1.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeccompProfile_To_v1_SeccompProfile(a.(*core.SeccompProfile), b.(*corev1.SeccompProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Secret)(nil), (*v1.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Secret_To_v1_Secret(a.(*core.Secret), b.(*v1.Secret), scope) + if err := s.AddGeneratedConversionFunc((*core.Secret)(nil), (*corev1.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Secret_To_v1_Secret(a.(*core.Secret), b.(*corev1.Secret), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecretEnvSource)(nil), (*core.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecretEnvSource_To_core_SecretEnvSource(a.(*v1.SecretEnvSource), b.(*core.SecretEnvSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecretEnvSource)(nil), (*core.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretEnvSource_To_core_SecretEnvSource(a.(*corev1.SecretEnvSource), b.(*core.SecretEnvSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecretEnvSource)(nil), (*v1.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecretEnvSource_To_v1_SecretEnvSource(a.(*core.SecretEnvSource), b.(*v1.SecretEnvSource), scope) + if err := s.AddGeneratedConversionFunc((*core.SecretEnvSource)(nil), (*corev1.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretEnvSource_To_v1_SecretEnvSource(a.(*core.SecretEnvSource), b.(*corev1.SecretEnvSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecretKeySelector)(nil), (*core.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecretKeySelector_To_core_SecretKeySelector(a.(*v1.SecretKeySelector), b.(*core.SecretKeySelector), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecretKeySelector)(nil), (*core.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretKeySelector_To_core_SecretKeySelector(a.(*corev1.SecretKeySelector), b.(*core.SecretKeySelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecretKeySelector)(nil), (*v1.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecretKeySelector_To_v1_SecretKeySelector(a.(*core.SecretKeySelector), b.(*v1.SecretKeySelector), scope) + if err := s.AddGeneratedConversionFunc((*core.SecretKeySelector)(nil), (*corev1.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretKeySelector_To_v1_SecretKeySelector(a.(*core.SecretKeySelector), b.(*corev1.SecretKeySelector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecretList)(nil), (*core.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecretList_To_core_SecretList(a.(*v1.SecretList), b.(*core.SecretList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecretList)(nil), (*core.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretList_To_core_SecretList(a.(*corev1.SecretList), b.(*core.SecretList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecretList)(nil), (*v1.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecretList_To_v1_SecretList(a.(*core.SecretList), b.(*v1.SecretList), scope) + if err := s.AddGeneratedConversionFunc((*core.SecretList)(nil), (*corev1.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretList_To_v1_SecretList(a.(*core.SecretList), b.(*corev1.SecretList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecretProjection)(nil), (*core.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecretProjection_To_core_SecretProjection(a.(*v1.SecretProjection), b.(*core.SecretProjection), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecretProjection)(nil), (*core.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretProjection_To_core_SecretProjection(a.(*corev1.SecretProjection), b.(*core.SecretProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecretProjection)(nil), (*v1.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecretProjection_To_v1_SecretProjection(a.(*core.SecretProjection), b.(*v1.SecretProjection), scope) + if err := s.AddGeneratedConversionFunc((*core.SecretProjection)(nil), (*corev1.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretProjection_To_v1_SecretProjection(a.(*core.SecretProjection), b.(*corev1.SecretProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecretReference)(nil), (*core.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecretReference_To_core_SecretReference(a.(*v1.SecretReference), b.(*core.SecretReference), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecretReference)(nil), (*core.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretReference_To_core_SecretReference(a.(*corev1.SecretReference), b.(*core.SecretReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecretReference)(nil), (*v1.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecretReference_To_v1_SecretReference(a.(*core.SecretReference), b.(*v1.SecretReference), scope) + if err := s.AddGeneratedConversionFunc((*core.SecretReference)(nil), (*corev1.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretReference_To_v1_SecretReference(a.(*core.SecretReference), b.(*corev1.SecretReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecretVolumeSource)(nil), (*core.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(a.(*v1.SecretVolumeSource), b.(*core.SecretVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecretVolumeSource)(nil), (*core.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(a.(*corev1.SecretVolumeSource), b.(*core.SecretVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecretVolumeSource)(nil), (*v1.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(a.(*core.SecretVolumeSource), b.(*v1.SecretVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.SecretVolumeSource)(nil), (*corev1.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(a.(*core.SecretVolumeSource), b.(*corev1.SecretVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SecurityContext)(nil), (*core.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SecurityContext_To_core_SecurityContext(a.(*v1.SecurityContext), b.(*core.SecurityContext), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SecurityContext)(nil), (*core.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecurityContext_To_core_SecurityContext(a.(*corev1.SecurityContext), b.(*core.SecurityContext), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SecurityContext)(nil), (*v1.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SecurityContext_To_v1_SecurityContext(a.(*core.SecurityContext), b.(*v1.SecurityContext), scope) + if err := s.AddGeneratedConversionFunc((*core.SecurityContext)(nil), (*corev1.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecurityContext_To_v1_SecurityContext(a.(*core.SecurityContext), b.(*corev1.SecurityContext), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SerializedReference)(nil), (*core.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SerializedReference_To_core_SerializedReference(a.(*v1.SerializedReference), b.(*core.SerializedReference), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SerializedReference)(nil), (*core.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SerializedReference_To_core_SerializedReference(a.(*corev1.SerializedReference), b.(*core.SerializedReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SerializedReference)(nil), (*v1.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SerializedReference_To_v1_SerializedReference(a.(*core.SerializedReference), b.(*v1.SerializedReference), scope) + if err := s.AddGeneratedConversionFunc((*core.SerializedReference)(nil), (*corev1.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SerializedReference_To_v1_SerializedReference(a.(*core.SerializedReference), b.(*corev1.SerializedReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Service)(nil), (*core.Service)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Service_To_core_Service(a.(*v1.Service), b.(*core.Service), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Service)(nil), (*core.Service)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Service_To_core_Service(a.(*corev1.Service), b.(*core.Service), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Service)(nil), (*v1.Service)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Service_To_v1_Service(a.(*core.Service), b.(*v1.Service), scope) + if err := s.AddGeneratedConversionFunc((*core.Service)(nil), (*corev1.Service)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Service_To_v1_Service(a.(*core.Service), b.(*corev1.Service), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceAccount)(nil), (*core.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceAccount_To_core_ServiceAccount(a.(*v1.ServiceAccount), b.(*core.ServiceAccount), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceAccount)(nil), (*core.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAccount_To_core_ServiceAccount(a.(*corev1.ServiceAccount), b.(*core.ServiceAccount), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceAccount)(nil), (*v1.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceAccount_To_v1_ServiceAccount(a.(*core.ServiceAccount), b.(*v1.ServiceAccount), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceAccount)(nil), (*corev1.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccount_To_v1_ServiceAccount(a.(*core.ServiceAccount), b.(*corev1.ServiceAccount), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceAccountList)(nil), (*core.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceAccountList_To_core_ServiceAccountList(a.(*v1.ServiceAccountList), b.(*core.ServiceAccountList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceAccountList)(nil), (*core.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAccountList_To_core_ServiceAccountList(a.(*corev1.ServiceAccountList), b.(*core.ServiceAccountList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceAccountList)(nil), (*v1.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceAccountList_To_v1_ServiceAccountList(a.(*core.ServiceAccountList), b.(*v1.ServiceAccountList), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceAccountList)(nil), (*corev1.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccountList_To_v1_ServiceAccountList(a.(*core.ServiceAccountList), b.(*corev1.ServiceAccountList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceAccountTokenProjection)(nil), (*core.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(a.(*v1.ServiceAccountTokenProjection), b.(*core.ServiceAccountTokenProjection), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceAccountTokenProjection)(nil), (*core.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(a.(*corev1.ServiceAccountTokenProjection), b.(*core.ServiceAccountTokenProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceAccountTokenProjection)(nil), (*v1.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(a.(*core.ServiceAccountTokenProjection), b.(*v1.ServiceAccountTokenProjection), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceAccountTokenProjection)(nil), (*corev1.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(a.(*core.ServiceAccountTokenProjection), b.(*corev1.ServiceAccountTokenProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceList)(nil), (*core.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceList_To_core_ServiceList(a.(*v1.ServiceList), b.(*core.ServiceList), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceList)(nil), (*core.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceList_To_core_ServiceList(a.(*corev1.ServiceList), b.(*core.ServiceList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceList)(nil), (*v1.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceList_To_v1_ServiceList(a.(*core.ServiceList), b.(*v1.ServiceList), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceList)(nil), (*corev1.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceList_To_v1_ServiceList(a.(*core.ServiceList), b.(*corev1.ServiceList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServicePort)(nil), (*core.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServicePort_To_core_ServicePort(a.(*v1.ServicePort), b.(*core.ServicePort), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServicePort)(nil), (*core.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServicePort_To_core_ServicePort(a.(*corev1.ServicePort), b.(*core.ServicePort), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServicePort)(nil), (*v1.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServicePort_To_v1_ServicePort(a.(*core.ServicePort), b.(*v1.ServicePort), scope) + if err := s.AddGeneratedConversionFunc((*core.ServicePort)(nil), (*corev1.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServicePort_To_v1_ServicePort(a.(*core.ServicePort), b.(*corev1.ServicePort), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceProxyOptions)(nil), (*core.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(a.(*v1.ServiceProxyOptions), b.(*core.ServiceProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceProxyOptions)(nil), (*core.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(a.(*corev1.ServiceProxyOptions), b.(*core.ServiceProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceProxyOptions)(nil), (*v1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(a.(*core.ServiceProxyOptions), b.(*v1.ServiceProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceProxyOptions)(nil), (*corev1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(a.(*core.ServiceProxyOptions), b.(*corev1.ServiceProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceSpec)(nil), (*core.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceSpec_To_core_ServiceSpec(a.(*v1.ServiceSpec), b.(*core.ServiceSpec), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceSpec)(nil), (*core.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceSpec_To_core_ServiceSpec(a.(*corev1.ServiceSpec), b.(*core.ServiceSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceSpec)(nil), (*v1.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceSpec_To_v1_ServiceSpec(a.(*core.ServiceSpec), b.(*v1.ServiceSpec), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceSpec)(nil), (*corev1.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceSpec_To_v1_ServiceSpec(a.(*core.ServiceSpec), b.(*corev1.ServiceSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ServiceStatus)(nil), (*core.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceStatus_To_core_ServiceStatus(a.(*v1.ServiceStatus), b.(*core.ServiceStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.ServiceStatus)(nil), (*core.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceStatus_To_core_ServiceStatus(a.(*corev1.ServiceStatus), b.(*core.ServiceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ServiceStatus)(nil), (*v1.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ServiceStatus_To_v1_ServiceStatus(a.(*core.ServiceStatus), b.(*v1.ServiceStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.ServiceStatus)(nil), (*corev1.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceStatus_To_v1_ServiceStatus(a.(*core.ServiceStatus), b.(*corev1.ServiceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SessionAffinityConfig)(nil), (*core.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(a.(*v1.SessionAffinityConfig), b.(*core.SessionAffinityConfig), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SessionAffinityConfig)(nil), (*core.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(a.(*corev1.SessionAffinityConfig), b.(*core.SessionAffinityConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SessionAffinityConfig)(nil), (*v1.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(a.(*core.SessionAffinityConfig), b.(*v1.SessionAffinityConfig), scope) + if err := s.AddGeneratedConversionFunc((*core.SessionAffinityConfig)(nil), (*corev1.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(a.(*core.SessionAffinityConfig), b.(*corev1.SessionAffinityConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.SleepAction)(nil), (*core.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_SleepAction_To_core_SleepAction(a.(*v1.SleepAction), b.(*core.SleepAction), scope) + if err := s.AddGeneratedConversionFunc((*corev1.SleepAction)(nil), (*core.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SleepAction_To_core_SleepAction(a.(*corev1.SleepAction), b.(*core.SleepAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.SleepAction)(nil), (*v1.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SleepAction_To_v1_SleepAction(a.(*core.SleepAction), b.(*v1.SleepAction), scope) + if err := s.AddGeneratedConversionFunc((*core.SleepAction)(nil), (*corev1.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SleepAction_To_v1_SleepAction(a.(*core.SleepAction), b.(*corev1.SleepAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*v1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*corev1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.StorageOSPersistentVolumeSource)(nil), (*v1.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(a.(*core.StorageOSPersistentVolumeSource), b.(*v1.StorageOSPersistentVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.StorageOSPersistentVolumeSource)(nil), (*corev1.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(a.(*core.StorageOSPersistentVolumeSource), b.(*corev1.StorageOSPersistentVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.StorageOSVolumeSource)(nil), (*core.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(a.(*v1.StorageOSVolumeSource), b.(*core.StorageOSVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.StorageOSVolumeSource)(nil), (*core.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(a.(*corev1.StorageOSVolumeSource), b.(*core.StorageOSVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.StorageOSVolumeSource)(nil), (*v1.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(a.(*core.StorageOSVolumeSource), b.(*v1.StorageOSVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.StorageOSVolumeSource)(nil), (*corev1.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(a.(*core.StorageOSVolumeSource), b.(*corev1.StorageOSVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Sysctl)(nil), (*core.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Sysctl_To_core_Sysctl(a.(*v1.Sysctl), b.(*core.Sysctl), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Sysctl)(nil), (*core.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Sysctl_To_core_Sysctl(a.(*corev1.Sysctl), b.(*core.Sysctl), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Sysctl)(nil), (*v1.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Sysctl_To_v1_Sysctl(a.(*core.Sysctl), b.(*v1.Sysctl), scope) + if err := s.AddGeneratedConversionFunc((*core.Sysctl)(nil), (*corev1.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Sysctl_To_v1_Sysctl(a.(*core.Sysctl), b.(*corev1.Sysctl), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.TCPSocketAction)(nil), (*core.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TCPSocketAction_To_core_TCPSocketAction(a.(*v1.TCPSocketAction), b.(*core.TCPSocketAction), scope) + if err := s.AddGeneratedConversionFunc((*corev1.TCPSocketAction)(nil), (*core.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TCPSocketAction_To_core_TCPSocketAction(a.(*corev1.TCPSocketAction), b.(*core.TCPSocketAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.TCPSocketAction)(nil), (*v1.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_TCPSocketAction_To_v1_TCPSocketAction(a.(*core.TCPSocketAction), b.(*v1.TCPSocketAction), scope) + if err := s.AddGeneratedConversionFunc((*core.TCPSocketAction)(nil), (*corev1.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TCPSocketAction_To_v1_TCPSocketAction(a.(*core.TCPSocketAction), b.(*corev1.TCPSocketAction), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Taint)(nil), (*core.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Taint_To_core_Taint(a.(*v1.Taint), b.(*core.Taint), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Taint)(nil), (*core.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Taint_To_core_Taint(a.(*corev1.Taint), b.(*core.Taint), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Taint)(nil), (*v1.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Taint_To_v1_Taint(a.(*core.Taint), b.(*v1.Taint), scope) + if err := s.AddGeneratedConversionFunc((*core.Taint)(nil), (*corev1.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Taint_To_v1_Taint(a.(*core.Taint), b.(*corev1.Taint), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Toleration_To_core_Toleration(a.(*v1.Toleration), b.(*core.Toleration), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Toleration_To_core_Toleration(a.(*corev1.Toleration), b.(*core.Toleration), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*v1.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Toleration_To_v1_Toleration(a.(*core.Toleration), b.(*v1.Toleration), scope) + if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*corev1.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Toleration_To_v1_Toleration(a.(*core.Toleration), b.(*corev1.Toleration), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.TopologySelectorLabelRequirement)(nil), (*core.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(a.(*v1.TopologySelectorLabelRequirement), b.(*core.TopologySelectorLabelRequirement), scope) + if err := s.AddGeneratedConversionFunc((*corev1.TopologySelectorLabelRequirement)(nil), (*core.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(a.(*corev1.TopologySelectorLabelRequirement), b.(*core.TopologySelectorLabelRequirement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.TopologySelectorLabelRequirement)(nil), (*v1.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(a.(*core.TopologySelectorLabelRequirement), b.(*v1.TopologySelectorLabelRequirement), scope) + if err := s.AddGeneratedConversionFunc((*core.TopologySelectorLabelRequirement)(nil), (*corev1.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(a.(*core.TopologySelectorLabelRequirement), b.(*corev1.TopologySelectorLabelRequirement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.TopologySelectorTerm)(nil), (*core.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(a.(*v1.TopologySelectorTerm), b.(*core.TopologySelectorTerm), scope) + if err := s.AddGeneratedConversionFunc((*corev1.TopologySelectorTerm)(nil), (*core.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(a.(*corev1.TopologySelectorTerm), b.(*core.TopologySelectorTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.TopologySelectorTerm)(nil), (*v1.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(a.(*core.TopologySelectorTerm), b.(*v1.TopologySelectorTerm), scope) + if err := s.AddGeneratedConversionFunc((*core.TopologySelectorTerm)(nil), (*corev1.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(a.(*core.TopologySelectorTerm), b.(*corev1.TopologySelectorTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.TopologySpreadConstraint)(nil), (*core.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(a.(*v1.TopologySpreadConstraint), b.(*core.TopologySpreadConstraint), scope) + if err := s.AddGeneratedConversionFunc((*corev1.TopologySpreadConstraint)(nil), (*core.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(a.(*corev1.TopologySpreadConstraint), b.(*core.TopologySpreadConstraint), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.TopologySpreadConstraint)(nil), (*v1.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(a.(*core.TopologySpreadConstraint), b.(*v1.TopologySpreadConstraint), scope) + if err := s.AddGeneratedConversionFunc((*core.TopologySpreadConstraint)(nil), (*corev1.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(a.(*core.TopologySpreadConstraint), b.(*corev1.TopologySpreadConstraint), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.TypedLocalObjectReference)(nil), (*core.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(a.(*v1.TypedLocalObjectReference), b.(*core.TypedLocalObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*corev1.TypedLocalObjectReference)(nil), (*core.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(a.(*corev1.TypedLocalObjectReference), b.(*core.TypedLocalObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.TypedLocalObjectReference)(nil), (*v1.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(a.(*core.TypedLocalObjectReference), b.(*v1.TypedLocalObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*core.TypedLocalObjectReference)(nil), (*corev1.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(a.(*core.TypedLocalObjectReference), b.(*corev1.TypedLocalObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.TypedObjectReference)(nil), (*core.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TypedObjectReference_To_core_TypedObjectReference(a.(*v1.TypedObjectReference), b.(*core.TypedObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*corev1.TypedObjectReference)(nil), (*core.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypedObjectReference_To_core_TypedObjectReference(a.(*corev1.TypedObjectReference), b.(*core.TypedObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.TypedObjectReference)(nil), (*v1.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_TypedObjectReference_To_v1_TypedObjectReference(a.(*core.TypedObjectReference), b.(*v1.TypedObjectReference), scope) + if err := s.AddGeneratedConversionFunc((*core.TypedObjectReference)(nil), (*corev1.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TypedObjectReference_To_v1_TypedObjectReference(a.(*core.TypedObjectReference), b.(*corev1.TypedObjectReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Volume_To_core_Volume(a.(*v1.Volume), b.(*core.Volume), scope) + if err := s.AddGeneratedConversionFunc((*corev1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Volume_To_core_Volume(a.(*corev1.Volume), b.(*core.Volume), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*v1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*v1.Volume), scope) + if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*corev1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*corev1.Volume), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*v1.VolumeDevice), b.(*core.VolumeDevice), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*corev1.VolumeDevice), b.(*core.VolumeDevice), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeDevice)(nil), (*v1.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeDevice_To_v1_VolumeDevice(a.(*core.VolumeDevice), b.(*v1.VolumeDevice), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeDevice)(nil), (*corev1.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeDevice_To_v1_VolumeDevice(a.(*core.VolumeDevice), b.(*corev1.VolumeDevice), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeMount)(nil), (*core.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeMount_To_core_VolumeMount(a.(*v1.VolumeMount), b.(*core.VolumeMount), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeMount)(nil), (*core.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeMount_To_core_VolumeMount(a.(*corev1.VolumeMount), b.(*core.VolumeMount), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeMount)(nil), (*v1.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeMount_To_v1_VolumeMount(a.(*core.VolumeMount), b.(*v1.VolumeMount), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeMount)(nil), (*corev1.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeMount_To_v1_VolumeMount(a.(*core.VolumeMount), b.(*corev1.VolumeMount), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeMountStatus)(nil), (*core.VolumeMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus(a.(*v1.VolumeMountStatus), b.(*core.VolumeMountStatus), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeMountStatus)(nil), (*core.VolumeMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus(a.(*corev1.VolumeMountStatus), b.(*core.VolumeMountStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeMountStatus)(nil), (*v1.VolumeMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus(a.(*core.VolumeMountStatus), b.(*v1.VolumeMountStatus), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeMountStatus)(nil), (*corev1.VolumeMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus(a.(*core.VolumeMountStatus), b.(*corev1.VolumeMountStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeNodeAffinity)(nil), (*core.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(a.(*v1.VolumeNodeAffinity), b.(*core.VolumeNodeAffinity), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeNodeAffinity)(nil), (*core.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(a.(*corev1.VolumeNodeAffinity), b.(*core.VolumeNodeAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeNodeAffinity)(nil), (*v1.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(a.(*core.VolumeNodeAffinity), b.(*v1.VolumeNodeAffinity), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeNodeAffinity)(nil), (*corev1.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(a.(*core.VolumeNodeAffinity), b.(*corev1.VolumeNodeAffinity), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeProjection)(nil), (*core.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeProjection_To_core_VolumeProjection(a.(*v1.VolumeProjection), b.(*core.VolumeProjection), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeProjection)(nil), (*core.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeProjection_To_core_VolumeProjection(a.(*corev1.VolumeProjection), b.(*core.VolumeProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeProjection)(nil), (*v1.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeProjection_To_v1_VolumeProjection(a.(*core.VolumeProjection), b.(*v1.VolumeProjection), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeProjection)(nil), (*corev1.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeProjection_To_v1_VolumeProjection(a.(*core.VolumeProjection), b.(*corev1.VolumeProjection), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeResourceRequirements)(nil), (*core.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(a.(*v1.VolumeResourceRequirements), b.(*core.VolumeResourceRequirements), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeResourceRequirements)(nil), (*core.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(a.(*corev1.VolumeResourceRequirements), b.(*core.VolumeResourceRequirements), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeResourceRequirements)(nil), (*v1.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(a.(*core.VolumeResourceRequirements), b.(*v1.VolumeResourceRequirements), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeResourceRequirements)(nil), (*corev1.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(a.(*core.VolumeResourceRequirements), b.(*corev1.VolumeResourceRequirements), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VolumeSource)(nil), (*core.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeSource_To_core_VolumeSource(a.(*v1.VolumeSource), b.(*core.VolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VolumeSource)(nil), (*core.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeSource_To_core_VolumeSource(a.(*corev1.VolumeSource), b.(*core.VolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VolumeSource)(nil), (*v1.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VolumeSource_To_v1_VolumeSource(a.(*core.VolumeSource), b.(*v1.VolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.VolumeSource)(nil), (*corev1.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeSource_To_v1_VolumeSource(a.(*core.VolumeSource), b.(*corev1.VolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.VsphereVirtualDiskVolumeSource)(nil), (*core.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(a.(*v1.VsphereVirtualDiskVolumeSource), b.(*core.VsphereVirtualDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*corev1.VsphereVirtualDiskVolumeSource)(nil), (*core.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(a.(*corev1.VsphereVirtualDiskVolumeSource), b.(*core.VsphereVirtualDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.VsphereVirtualDiskVolumeSource)(nil), (*v1.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(a.(*core.VsphereVirtualDiskVolumeSource), b.(*v1.VsphereVirtualDiskVolumeSource), scope) + if err := s.AddGeneratedConversionFunc((*core.VsphereVirtualDiskVolumeSource)(nil), (*corev1.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(a.(*core.VsphereVirtualDiskVolumeSource), b.(*corev1.VsphereVirtualDiskVolumeSource), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.WeightedPodAffinityTerm)(nil), (*core.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(a.(*v1.WeightedPodAffinityTerm), b.(*core.WeightedPodAffinityTerm), scope) + if err := s.AddGeneratedConversionFunc((*corev1.WeightedPodAffinityTerm)(nil), (*core.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(a.(*corev1.WeightedPodAffinityTerm), b.(*core.WeightedPodAffinityTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.WeightedPodAffinityTerm)(nil), (*v1.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(a.(*core.WeightedPodAffinityTerm), b.(*v1.WeightedPodAffinityTerm), scope) + if err := s.AddGeneratedConversionFunc((*core.WeightedPodAffinityTerm)(nil), (*corev1.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(a.(*core.WeightedPodAffinityTerm), b.(*corev1.WeightedPodAffinityTerm), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.WindowsSecurityContextOptions)(nil), (*core.WindowsSecurityContextOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(a.(*v1.WindowsSecurityContextOptions), b.(*core.WindowsSecurityContextOptions), scope) + if err := s.AddGeneratedConversionFunc((*corev1.WindowsSecurityContextOptions)(nil), (*core.WindowsSecurityContextOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(a.(*corev1.WindowsSecurityContextOptions), b.(*core.WindowsSecurityContextOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.WindowsSecurityContextOptions)(nil), (*v1.WindowsSecurityContextOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(a.(*core.WindowsSecurityContextOptions), b.(*v1.WindowsSecurityContextOptions), scope) + if err := s.AddGeneratedConversionFunc((*core.WindowsSecurityContextOptions)(nil), (*corev1.WindowsSecurityContextOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(a.(*core.WindowsSecurityContextOptions), b.(*corev1.WindowsSecurityContextOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_NodeProxyOptions(a.(*url.Values), b.(*v1.NodeProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_NodeProxyOptions(a.(*url.Values), b.(*corev1.NodeProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_PodAttachOptions(a.(*url.Values), b.(*v1.PodAttachOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodAttachOptions(a.(*url.Values), b.(*corev1.PodAttachOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_PodExecOptions(a.(*url.Values), b.(*v1.PodExecOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodExecOptions(a.(*url.Values), b.(*corev1.PodExecOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_PodLogOptions(a.(*url.Values), b.(*v1.PodLogOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodLogOptions(a.(*url.Values), b.(*corev1.PodLogOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_PodPortForwardOptions(a.(*url.Values), b.(*v1.PodPortForwardOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodPortForwardOptions(a.(*url.Values), b.(*corev1.PodPortForwardOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_PodProxyOptions(a.(*url.Values), b.(*v1.PodProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PodProxyOptions(a.(*url.Values), b.(*corev1.PodProxyOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*v1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_ServiceProxyOptions(a.(*url.Values), b.(*v1.ServiceProxyOptions), scope) + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ServiceProxyOptions(a.(*url.Values), b.(*corev1.ServiceProxyOptions), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) + if err := s.AddConversionFunc((*[]string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_string(a.(*[]string), b.(**string), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*apps.ReplicaSetStatus), b.(*v1.ReplicationControllerStatus), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*corev1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*corev1.ReplicationControllerSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.ReplicaSet)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_ReplicaSet_To_v1_ReplicationController(a.(*apps.ReplicaSet), b.(*v1.ReplicationController), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetStatus)(nil), (*corev1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*apps.ReplicaSetStatus), b.(*corev1.ReplicationControllerStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.NodeSpec)(nil), (*v1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*v1.NodeSpec), scope) + if err := s.AddConversionFunc((*apps.ReplicaSet)(nil), (*corev1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1_ReplicationController(a.(*apps.ReplicaSet), b.(*corev1.ReplicationController), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.PersistentVolumeSpec)(nil), (*v1.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(a.(*core.PersistentVolumeSpec), b.(*v1.PersistentVolumeSpec), scope) + if err := s.AddConversionFunc((*core.NodeSpec)(nil), (*corev1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*corev1.NodeSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.PodSpec)(nil), (*v1.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodSpec_To_v1_PodSpec(a.(*core.PodSpec), b.(*v1.PodSpec), scope) + if err := s.AddConversionFunc((*core.PersistentVolumeSpec)(nil), (*corev1.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(a.(*core.PersistentVolumeSpec), b.(*corev1.PersistentVolumeSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.PodStatus)(nil), (*v1.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodStatus_To_v1_PodStatus(a.(*core.PodStatus), b.(*v1.PodStatus), scope) + if err := s.AddConversionFunc((*core.PodSpec)(nil), (*corev1.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSpec_To_v1_PodSpec(a.(*core.PodSpec), b.(*corev1.PodSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.PodTemplateSpec)(nil), (*v1.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(a.(*core.PodTemplateSpec), b.(*v1.PodTemplateSpec), scope) + if err := s.AddConversionFunc((*core.PodStatus)(nil), (*corev1.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodStatus_To_v1_PodStatus(a.(*core.PodStatus), b.(*corev1.PodStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.Pod)(nil), (*v1.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Pod_To_v1_Pod(a.(*core.Pod), b.(*v1.Pod), scope) + if err := s.AddConversionFunc((*core.PodTemplateSpec)(nil), (*corev1.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(a.(*core.PodTemplateSpec), b.(*corev1.PodTemplateSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*core.ReplicationControllerSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(a.(*core.ReplicationControllerSpec), b.(*v1.ReplicationControllerSpec), scope) + if err := s.AddConversionFunc((*core.Pod)(nil), (*corev1.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Pod_To_v1_Pod(a.(*core.Pod), b.(*corev1.Pod), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*v1.NodeSpec), b.(*core.NodeSpec), scope) + if err := s.AddConversionFunc((*core.ReplicationControllerSpec)(nil), (*corev1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(a.(*core.ReplicationControllerSpec), b.(*corev1.ReplicationControllerSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.PersistentVolumeSpec)(nil), (*core.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(a.(*v1.PersistentVolumeSpec), b.(*core.PersistentVolumeSpec), scope) + if err := s.AddConversionFunc((*corev1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*corev1.NodeSpec), b.(*core.NodeSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.PodSpec)(nil), (*core.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodSpec_To_core_PodSpec(a.(*v1.PodSpec), b.(*core.PodSpec), scope) + if err := s.AddConversionFunc((*corev1.PersistentVolumeSpec)(nil), (*core.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(a.(*corev1.PersistentVolumeSpec), b.(*core.PersistentVolumeSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.PodStatus)(nil), (*core.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodStatus_To_core_PodStatus(a.(*v1.PodStatus), b.(*core.PodStatus), scope) + if err := s.AddConversionFunc((*corev1.PodSpec)(nil), (*core.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSpec_To_core_PodSpec(a.(*corev1.PodSpec), b.(*core.PodSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.PodTemplateSpec)(nil), (*core.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(a.(*v1.PodTemplateSpec), b.(*core.PodTemplateSpec), scope) + if err := s.AddConversionFunc((*corev1.PodStatus)(nil), (*core.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodStatus_To_core_PodStatus(a.(*corev1.PodStatus), b.(*core.PodStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.Pod)(nil), (*core.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Pod_To_core_Pod(a.(*v1.Pod), b.(*core.Pod), scope) + if err := s.AddConversionFunc((*corev1.PodTemplateSpec)(nil), (*core.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(a.(*corev1.PodTemplateSpec), b.(*core.PodTemplateSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicationControllerSpec), b.(*apps.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*corev1.Pod)(nil), (*core.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Pod_To_core_Pod(a.(*corev1.Pod), b.(*core.Pod), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) + if err := s.AddConversionFunc((*corev1.ReplicationControllerSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(a.(*corev1.ReplicationControllerSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(a.(*v1.ReplicationControllerStatus), b.(*apps.ReplicaSetStatus), scope) + if err := s.AddConversionFunc((*corev1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*corev1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationController)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationController_To_apps_ReplicaSet(a.(*v1.ReplicationController), b.(*apps.ReplicaSet), scope) + if err := s.AddConversionFunc((*corev1.ReplicationControllerStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(a.(*corev1.ReplicationControllerStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ResourceList)(nil), (*core.ResourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceList_To_core_ResourceList(a.(*v1.ResourceList), b.(*core.ResourceList), scope) + if err := s.AddConversionFunc((*corev1.ReplicationController)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationController_To_apps_ReplicaSet(a.(*corev1.ReplicationController), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.Secret)(nil), (*core.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Secret_To_core_Secret(a.(*v1.Secret), b.(*core.Secret), scope) + if err := s.AddConversionFunc((*corev1.ResourceList)(nil), (*core.ResourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceList_To_core_ResourceList(a.(*corev1.ResourceList), b.(*core.ResourceList), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*corev1.Secret)(nil), (*core.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Secret_To_core_Secret(a.(*corev1.Secret), b.(*core.Secret), scope) }); err != nil { return err } return nil } -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *corev1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.Partition = in.Partition @@ -2424,11 +2429,11 @@ func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStor } // Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *corev1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in, out, s) } -func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { +func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *corev1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.Partition = in.Partition @@ -2437,11 +2442,11 @@ func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStor } // Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { +func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *corev1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { return autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) } -func autoConvert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { +func autoConvert_v1_Affinity_To_core_Affinity(in *corev1.Affinity, out *core.Affinity, s conversion.Scope) error { out.NodeAffinity = (*core.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) out.PodAffinity = (*core.PodAffinity)(unsafe.Pointer(in.PodAffinity)) out.PodAntiAffinity = (*core.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) @@ -2449,87 +2454,87 @@ func autoConvert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinit } // Convert_v1_Affinity_To_core_Affinity is an autogenerated conversion function. -func Convert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { +func Convert_v1_Affinity_To_core_Affinity(in *corev1.Affinity, out *core.Affinity, s conversion.Scope) error { return autoConvert_v1_Affinity_To_core_Affinity(in, out, s) } -func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) +func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *corev1.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*corev1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*corev1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) return nil } // Convert_core_Affinity_To_v1_Affinity is an autogenerated conversion function. -func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { +func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *corev1.Affinity, s conversion.Scope) error { return autoConvert_core_Affinity_To_v1_Affinity(in, out, s) } -func autoConvert_v1_AppArmorProfile_To_core_AppArmorProfile(in *v1.AppArmorProfile, out *core.AppArmorProfile, s conversion.Scope) error { +func autoConvert_v1_AppArmorProfile_To_core_AppArmorProfile(in *corev1.AppArmorProfile, out *core.AppArmorProfile, s conversion.Scope) error { out.Type = core.AppArmorProfileType(in.Type) out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile)) return nil } // Convert_v1_AppArmorProfile_To_core_AppArmorProfile is an autogenerated conversion function. -func Convert_v1_AppArmorProfile_To_core_AppArmorProfile(in *v1.AppArmorProfile, out *core.AppArmorProfile, s conversion.Scope) error { +func Convert_v1_AppArmorProfile_To_core_AppArmorProfile(in *corev1.AppArmorProfile, out *core.AppArmorProfile, s conversion.Scope) error { return autoConvert_v1_AppArmorProfile_To_core_AppArmorProfile(in, out, s) } -func autoConvert_core_AppArmorProfile_To_v1_AppArmorProfile(in *core.AppArmorProfile, out *v1.AppArmorProfile, s conversion.Scope) error { - out.Type = v1.AppArmorProfileType(in.Type) +func autoConvert_core_AppArmorProfile_To_v1_AppArmorProfile(in *core.AppArmorProfile, out *corev1.AppArmorProfile, s conversion.Scope) error { + out.Type = corev1.AppArmorProfileType(in.Type) out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile)) return nil } // Convert_core_AppArmorProfile_To_v1_AppArmorProfile is an autogenerated conversion function. -func Convert_core_AppArmorProfile_To_v1_AppArmorProfile(in *core.AppArmorProfile, out *v1.AppArmorProfile, s conversion.Scope) error { +func Convert_core_AppArmorProfile_To_v1_AppArmorProfile(in *core.AppArmorProfile, out *corev1.AppArmorProfile, s conversion.Scope) error { return autoConvert_core_AppArmorProfile_To_v1_AppArmorProfile(in, out, s) } -func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { +func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *corev1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { out.Name = core.UniqueVolumeName(in.Name) out.DevicePath = in.DevicePath return nil } // Convert_v1_AttachedVolume_To_core_AttachedVolume is an autogenerated conversion function. -func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { +func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *corev1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { return autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in, out, s) } -func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - out.Name = v1.UniqueVolumeName(in.Name) +func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *corev1.AttachedVolume, s conversion.Scope) error { + out.Name = corev1.UniqueVolumeName(in.Name) out.DevicePath = in.DevicePath return nil } // Convert_core_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. -func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { +func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *corev1.AttachedVolume, s conversion.Scope) error { return autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in, out, s) } -func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { +func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *corev1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { out.PreferAvoidPods = *(*[]core.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) return nil } // Convert_v1_AvoidPods_To_core_AvoidPods is an autogenerated conversion function. -func Convert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { +func Convert_v1_AvoidPods_To_core_AvoidPods(in *corev1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { return autoConvert_v1_AvoidPods_To_core_AvoidPods(in, out, s) } -func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) +func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *corev1.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]corev1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) return nil } // Convert_core_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. -func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { +func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *corev1.AvoidPods, s conversion.Scope) error { return autoConvert_core_AvoidPods_To_v1_AvoidPods(in, out, s) } -func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { +func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *corev1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { out.DiskName = in.DiskName out.DataDiskURI = in.DataDiskURI out.CachingMode = (*core.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) @@ -2540,26 +2545,26 @@ func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.A } // Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { +func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *corev1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { return autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in, out, s) } -func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { +func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *corev1.AzureDiskVolumeSource, s conversion.Scope) error { out.DiskName = in.DiskName out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.CachingMode = (*corev1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) out.FSType = (*string)(unsafe.Pointer(in.FSType)) out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + out.Kind = (*corev1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) return nil } // Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { +func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *corev1.AzureDiskVolumeSource, s conversion.Scope) error { return autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) } -func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *corev1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.ShareName = in.ShareName out.ReadOnly = in.ReadOnly @@ -2568,11 +2573,11 @@ func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentV } // Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *corev1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in, out, s) } -func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *corev1.AzureFilePersistentVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.ShareName = in.ShareName out.ReadOnly = in.ReadOnly @@ -2581,11 +2586,11 @@ func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentV } // Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { +func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *corev1.AzureFilePersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) } -func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { +func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *corev1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.ShareName = in.ShareName out.ReadOnly = in.ReadOnly @@ -2593,11 +2598,11 @@ func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.A } // Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { +func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *corev1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { return autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in, out, s) } -func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { +func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *corev1.AzureFileVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.ShareName = in.ShareName out.ReadOnly = in.ReadOnly @@ -2605,11 +2610,11 @@ func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core } // Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { +func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *corev1.AzureFileVolumeSource, s conversion.Scope) error { return autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) } -func autoConvert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { +func autoConvert_v1_Binding_To_core_Binding(in *corev1.Binding, out *core.Binding, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Target, &out.Target, s); err != nil { return err @@ -2618,11 +2623,11 @@ func autoConvert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s } // Convert_v1_Binding_To_core_Binding is an autogenerated conversion function. -func Convert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { +func Convert_v1_Binding_To_core_Binding(in *corev1.Binding, out *core.Binding, s conversion.Scope) error { return autoConvert_v1_Binding_To_core_Binding(in, out, s) } -func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { +func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *corev1.Binding, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { return err @@ -2631,11 +2636,11 @@ func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s } // Convert_core_Binding_To_v1_Binding is an autogenerated conversion function. -func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { +func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *corev1.Binding, s conversion.Scope) error { return autoConvert_core_Binding_To_v1_Binding(in, out, s) } -func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *corev1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.VolumeHandle = in.VolumeHandle out.ReadOnly = in.ReadOnly @@ -2650,30 +2655,30 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource( } // Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *corev1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in, out, s) } -func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *corev1.CSIPersistentVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.VolumeHandle = in.VolumeHandle out.ReadOnly = in.ReadOnly out.FSType = in.FSType out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes)) - out.ControllerPublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef)) - out.NodeStageSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef)) - out.NodePublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef)) - out.ControllerExpandSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerExpandSecretRef)) - out.NodeExpandSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodeExpandSecretRef)) + out.ControllerPublishSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef)) + out.NodeStageSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef)) + out.NodePublishSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef)) + out.ControllerExpandSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.ControllerExpandSecretRef)) + out.NodeExpandSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.NodeExpandSecretRef)) return nil } // Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *corev1.CSIPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in, out, s) } -func autoConvert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *v1.CSIVolumeSource, out *core.CSIVolumeSource, s conversion.Scope) error { +func autoConvert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *corev1.CSIVolumeSource, out *core.CSIVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) out.FSType = (*string)(unsafe.Pointer(in.FSType)) @@ -2683,47 +2688,47 @@ func autoConvert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *v1.CSIVolumeSour } // Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource is an autogenerated conversion function. -func Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *v1.CSIVolumeSource, out *core.CSIVolumeSource, s conversion.Scope) error { +func Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *corev1.CSIVolumeSource, out *core.CSIVolumeSource, s conversion.Scope) error { return autoConvert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in, out, s) } -func autoConvert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in *core.CSIVolumeSource, out *v1.CSIVolumeSource, s conversion.Scope) error { +func autoConvert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in *core.CSIVolumeSource, out *corev1.CSIVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) out.FSType = (*string)(unsafe.Pointer(in.FSType)) out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes)) - out.NodePublishSecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.NodePublishSecretRef)) + out.NodePublishSecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.NodePublishSecretRef)) return nil } // Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource is an autogenerated conversion function. -func Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in *core.CSIVolumeSource, out *v1.CSIVolumeSource, s conversion.Scope) error { +func Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in *core.CSIVolumeSource, out *corev1.CSIVolumeSource, s conversion.Scope) error { return autoConvert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in, out, s) } -func autoConvert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { +func autoConvert_v1_Capabilities_To_core_Capabilities(in *corev1.Capabilities, out *core.Capabilities, s conversion.Scope) error { out.Add = *(*[]core.Capability)(unsafe.Pointer(&in.Add)) out.Drop = *(*[]core.Capability)(unsafe.Pointer(&in.Drop)) return nil } // Convert_v1_Capabilities_To_core_Capabilities is an autogenerated conversion function. -func Convert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { +func Convert_v1_Capabilities_To_core_Capabilities(in *corev1.Capabilities, out *core.Capabilities, s conversion.Scope) error { return autoConvert_v1_Capabilities_To_core_Capabilities(in, out, s) } -func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) +func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *corev1.Capabilities, s conversion.Scope) error { + out.Add = *(*[]corev1.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]corev1.Capability)(unsafe.Pointer(&in.Drop)) return nil } // Convert_core_Capabilities_To_v1_Capabilities is an autogenerated conversion function. -func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { +func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *corev1.Capabilities, s conversion.Scope) error { return autoConvert_core_Capabilities_To_v1_Capabilities(in, out, s) } -func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *corev1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) out.Path = in.Path out.User = in.User @@ -2734,26 +2739,26 @@ func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeS } // Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *corev1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in, out, s) } -func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *corev1.CephFSPersistentVolumeSource, s conversion.Scope) error { out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) out.Path = in.Path out.User = in.User out.SecretFile = in.SecretFile - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef)) out.ReadOnly = in.ReadOnly return nil } // Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *corev1.CephFSPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) } -func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { +func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *corev1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) out.Path = in.Path out.User = in.User @@ -2764,26 +2769,26 @@ func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSV } // Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { +func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *corev1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { return autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in, out, s) } -func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { +func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *corev1.CephFSVolumeSource, s conversion.Scope) error { out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) out.Path = in.Path out.User = in.User out.SecretFile = in.SecretFile - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) out.ReadOnly = in.ReadOnly return nil } // Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. -func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { +func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *corev1.CephFSVolumeSource, s conversion.Scope) error { return autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) } -func autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *v1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *corev1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly @@ -2792,24 +2797,24 @@ func autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeS } // Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *v1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *corev1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in, out, s) } -func autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *v1.CinderPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *corev1.CinderPersistentVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef)) return nil } // Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *v1.CinderPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *corev1.CinderPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in, out, s) } -func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { +func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *corev1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly @@ -2818,44 +2823,44 @@ func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderV } // Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource is an autogenerated conversion function. -func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { +func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *corev1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { return autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in, out, s) } -func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { +func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *corev1.CinderVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) return nil } // Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. -func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { +func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *corev1.CinderVolumeSource, s conversion.Scope) error { return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) } -func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { +func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *corev1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) return nil } // Convert_v1_ClientIPConfig_To_core_ClientIPConfig is an autogenerated conversion function. -func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { +func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *corev1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { return autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in, out, s) } -func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { +func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *corev1.ClientIPConfig, s conversion.Scope) error { out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) return nil } // Convert_core_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. -func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { +func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *corev1.ClientIPConfig, s conversion.Scope) error { return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) } -func autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *v1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error { +func autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *corev1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error { out.Name = (*string)(unsafe.Pointer(in.Name)) out.SignerName = (*string)(unsafe.Pointer(in.SignerName)) out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) @@ -2865,11 +2870,11 @@ func autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProje } // Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection is an autogenerated conversion function. -func Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *v1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error { +func Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *corev1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error { return autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in, out, s) } -func autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *v1.ClusterTrustBundleProjection, s conversion.Scope) error { +func autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *corev1.ClusterTrustBundleProjection, s conversion.Scope) error { out.Name = (*string)(unsafe.Pointer(in.Name)) out.SignerName = (*string)(unsafe.Pointer(in.SignerName)) out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) @@ -2879,11 +2884,11 @@ func autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProje } // Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection is an autogenerated conversion function. -func Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *v1.ClusterTrustBundleProjection, s conversion.Scope) error { +func Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *corev1.ClusterTrustBundleProjection, s conversion.Scope) error { return autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in, out, s) } -func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { +func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *corev1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { out.Type = core.ComponentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.Message = in.Message @@ -2892,68 +2897,68 @@ func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.Compone } // Convert_v1_ComponentCondition_To_core_ComponentCondition is an autogenerated conversion function. -func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { +func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *corev1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { return autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in, out, s) } -func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - out.Type = v1.ComponentConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) +func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *corev1.ComponentCondition, s conversion.Scope) error { + out.Type = corev1.ComponentConditionType(in.Type) + out.Status = corev1.ConditionStatus(in.Status) out.Message = in.Message out.Error = in.Error return nil } // Convert_core_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. -func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { +func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *corev1.ComponentCondition, s conversion.Scope) error { return autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in, out, s) } -func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { +func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *corev1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Conditions = *(*[]core.ComponentCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_v1_ComponentStatus_To_core_ComponentStatus is an autogenerated conversion function. -func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { +func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *corev1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { return autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in, out, s) } -func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { +func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *corev1.ComponentStatus, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]corev1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_core_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. -func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { +func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *corev1.ComponentStatus, s conversion.Scope) error { return autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in, out, s) } -func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { +func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *corev1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.ComponentStatus)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_ComponentStatusList_To_core_ComponentStatusList is an autogenerated conversion function. -func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { +func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *corev1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { return autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in, out, s) } -func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { +func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *corev1.ComponentStatusList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.ComponentStatus)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. -func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { +func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *corev1.ComponentStatusList, s conversion.Scope) error { return autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) } -func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { +func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *corev1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Immutable = (*bool)(unsafe.Pointer(in.Immutable)) out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) @@ -2962,11 +2967,11 @@ func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.Conf } // Convert_v1_ConfigMap_To_core_ConfigMap is an autogenerated conversion function. -func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { +func Convert_v1_ConfigMap_To_core_ConfigMap(in *corev1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { return autoConvert_v1_ConfigMap_To_core_ConfigMap(in, out, s) } -func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { +func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *corev1.ConfigMap, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Immutable = (*bool)(unsafe.Pointer(in.Immutable)) out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) @@ -2975,11 +2980,11 @@ func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.Conf } // Convert_core_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. -func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { +func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *corev1.ConfigMap, s conversion.Scope) error { return autoConvert_core_ConfigMap_To_v1_ConfigMap(in, out, s) } -func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { +func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *corev1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -2988,11 +2993,11 @@ func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigM } // Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { +func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *corev1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { return autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in, out, s) } -func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { +func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *corev1.ConfigMapEnvSource, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -3001,11 +3006,11 @@ func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.Confi } // Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { +func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *corev1.ConfigMapEnvSource, s conversion.Scope) error { return autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) } -func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { +func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *corev1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -3015,11 +3020,11 @@ func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.Con } // Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { +func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *corev1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { return autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in, out, s) } -func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { +func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *corev1.ConfigMapKeySelector, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -3029,33 +3034,33 @@ func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.C } // Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { +func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *corev1.ConfigMapKeySelector, s conversion.Scope) error { return autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) } -func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { +func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *corev1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.ConfigMap)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_ConfigMapList_To_core_ConfigMapList is an autogenerated conversion function. -func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { +func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *corev1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { return autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in, out, s) } -func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { +func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *corev1.ConfigMapList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.ConfigMap)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. -func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { +func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *corev1.ConfigMapList, s conversion.Scope) error { return autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in, out, s) } -func autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *v1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error { +func autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *corev1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error { out.Namespace = in.Namespace out.Name = in.Name out.UID = types.UID(in.UID) @@ -3065,11 +3070,11 @@ func autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource( } // Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource is an autogenerated conversion function. -func Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *v1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error { +func Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *corev1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error { return autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in, out, s) } -func autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *v1.ConfigMapNodeConfigSource, s conversion.Scope) error { +func autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *corev1.ConfigMapNodeConfigSource, s conversion.Scope) error { out.Namespace = in.Namespace out.Name = in.Name out.UID = types.UID(in.UID) @@ -3079,11 +3084,11 @@ func autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource( } // Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource is an autogenerated conversion function. -func Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *v1.ConfigMapNodeConfigSource, s conversion.Scope) error { +func Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *corev1.ConfigMapNodeConfigSource, s conversion.Scope) error { return autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in, out, s) } -func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { +func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *corev1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -3093,25 +3098,25 @@ func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.Confi } // Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection is an autogenerated conversion function. -func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { +func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *corev1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { return autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in, out, s) } -func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { +func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *corev1.ConfigMapProjection, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items)) out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } // Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. -func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { +func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *corev1.ConfigMapProjection, s conversion.Scope) error { return autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) } -func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *corev1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -3122,26 +3127,26 @@ func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.C } // Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { +func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *corev1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { return autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in, out, s) } -func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { +func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *corev1.ConfigMapVolumeSource, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } // Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { +func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *corev1.ConfigMapVolumeSource, s conversion.Scope) error { return autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) } -func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { +func autoConvert_v1_Container_To_core_Container(in *corev1.Container, out *core.Container, s conversion.Scope) error { out.Name = in.Name out.Image = in.Image out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) @@ -3172,34 +3177,34 @@ func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Cont } // Convert_v1_Container_To_core_Container is an autogenerated conversion function. -func Convert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { +func Convert_v1_Container_To_core_Container(in *corev1.Container, out *core.Container, s conversion.Scope) error { return autoConvert_v1_Container_To_core_Container(in, out, s) } -func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { +func autoConvert_core_Container_To_v1_Container(in *core.Container, out *corev1.Container, s conversion.Scope) error { out.Name = in.Name out.Image = in.Image out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) out.WorkingDir = in.WorkingDir - out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.Ports = *(*[]corev1.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]corev1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.Env)) if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } - out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy)) - out.RestartPolicy = (*v1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy)) - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) - out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.StartupProbe = (*v1.Probe)(unsafe.Pointer(in.StartupProbe)) - out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.ResizePolicy = *(*[]corev1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy)) + out.RestartPolicy = (*corev1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy)) + out.VolumeMounts = *(*[]corev1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]corev1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*corev1.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*corev1.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.StartupProbe = (*corev1.Probe)(unsafe.Pointer(in.StartupProbe)) + out.Lifecycle = (*corev1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - out.SecurityContext = (*v1.SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.TerminationMessagePolicy = corev1.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = corev1.PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*corev1.SecurityContext)(unsafe.Pointer(in.SecurityContext)) out.Stdin = in.Stdin out.StdinOnce = in.StdinOnce out.TTY = in.TTY @@ -3207,33 +3212,33 @@ func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Cont } // Convert_core_Container_To_v1_Container is an autogenerated conversion function. -func Convert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { +func Convert_core_Container_To_v1_Container(in *core.Container, out *corev1.Container, s conversion.Scope) error { return autoConvert_core_Container_To_v1_Container(in, out, s) } -func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { +func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *corev1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) out.SizeBytes = in.SizeBytes return nil } // Convert_v1_ContainerImage_To_core_ContainerImage is an autogenerated conversion function. -func Convert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { +func Convert_v1_ContainerImage_To_core_ContainerImage(in *corev1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { return autoConvert_v1_ContainerImage_To_core_ContainerImage(in, out, s) } -func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { +func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *corev1.ContainerImage, s conversion.Scope) error { out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) out.SizeBytes = in.SizeBytes return nil } // Convert_core_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. -func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { +func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *corev1.ContainerImage, s conversion.Scope) error { return autoConvert_core_ContainerImage_To_v1_ContainerImage(in, out, s) } -func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { +func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *corev1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { out.Name = in.Name out.HostPort = in.HostPort out.ContainerPort = in.ContainerPort @@ -3243,47 +3248,47 @@ func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, ou } // Convert_v1_ContainerPort_To_core_ContainerPort is an autogenerated conversion function. -func Convert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { +func Convert_v1_ContainerPort_To_core_ContainerPort(in *corev1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { return autoConvert_v1_ContainerPort_To_core_ContainerPort(in, out, s) } -func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { +func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *corev1.ContainerPort, s conversion.Scope) error { out.Name = in.Name out.HostPort = in.HostPort out.ContainerPort = in.ContainerPort - out.Protocol = v1.Protocol(in.Protocol) + out.Protocol = corev1.Protocol(in.Protocol) out.HostIP = in.HostIP return nil } // Convert_core_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. -func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { +func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *corev1.ContainerPort, s conversion.Scope) error { return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s) } -func autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *v1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error { +func autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *corev1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error { out.ResourceName = core.ResourceName(in.ResourceName) out.RestartPolicy = core.ResourceResizeRestartPolicy(in.RestartPolicy) return nil } // Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy is an autogenerated conversion function. -func Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *v1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error { +func Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *corev1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error { return autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in, out, s) } -func autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *v1.ContainerResizePolicy, s conversion.Scope) error { - out.ResourceName = v1.ResourceName(in.ResourceName) - out.RestartPolicy = v1.ResourceResizeRestartPolicy(in.RestartPolicy) +func autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *corev1.ContainerResizePolicy, s conversion.Scope) error { + out.ResourceName = corev1.ResourceName(in.ResourceName) + out.RestartPolicy = corev1.ResourceResizeRestartPolicy(in.RestartPolicy) return nil } // Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy is an autogenerated conversion function. -func Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *v1.ContainerResizePolicy, s conversion.Scope) error { +func Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *corev1.ContainerResizePolicy, s conversion.Scope) error { return autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in, out, s) } -func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { +func autoConvert_v1_ContainerState_To_core_ContainerState(in *corev1.ContainerState, out *core.ContainerState, s conversion.Scope) error { out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running)) out.Terminated = (*core.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) @@ -3291,43 +3296,43 @@ func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, } // Convert_v1_ContainerState_To_core_ContainerState is an autogenerated conversion function. -func Convert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { +func Convert_v1_ContainerState_To_core_ContainerState(in *corev1.ContainerState, out *core.ContainerState, s conversion.Scope) error { return autoConvert_v1_ContainerState_To_core_ContainerState(in, out, s) } -func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) +func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *corev1.ContainerState, s conversion.Scope) error { + out.Waiting = (*corev1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*corev1.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*corev1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) return nil } // Convert_core_ContainerState_To_v1_ContainerState is an autogenerated conversion function. -func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { +func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *corev1.ContainerState, s conversion.Scope) error { return autoConvert_core_ContainerState_To_v1_ContainerState(in, out, s) } -func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { +func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *corev1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { out.StartedAt = in.StartedAt return nil } // Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning is an autogenerated conversion function. -func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { +func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *corev1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { return autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in, out, s) } -func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { +func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *corev1.ContainerStateRunning, s conversion.Scope) error { out.StartedAt = in.StartedAt return nil } // Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. -func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { +func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *corev1.ContainerStateRunning, s conversion.Scope) error { return autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) } -func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { +func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *corev1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { out.ExitCode = in.ExitCode out.Signal = in.Signal out.Reason = in.Reason @@ -3339,11 +3344,11 @@ func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in } // Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated is an autogenerated conversion function. -func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { +func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *corev1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { return autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in, out, s) } -func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { +func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *corev1.ContainerStateTerminated, s conversion.Scope) error { out.ExitCode = in.ExitCode out.Signal = in.Signal out.Reason = in.Reason @@ -3355,33 +3360,33 @@ func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in } // Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. -func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { +func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *corev1.ContainerStateTerminated, s conversion.Scope) error { return autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) } -func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { +func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *corev1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { out.Reason = in.Reason out.Message = in.Message return nil } // Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting is an autogenerated conversion function. -func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { +func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *corev1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { return autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in, out, s) } -func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { +func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *corev1.ContainerStateWaiting, s conversion.Scope) error { out.Reason = in.Reason out.Message = in.Message return nil } // Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. -func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { +func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *corev1.ContainerStateWaiting, s conversion.Scope) error { return autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) } -func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { +func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *corev1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { out.Name = in.Name if err := Convert_v1_ContainerState_To_core_ContainerState(&in.State, &out.State, s); err != nil { return err @@ -3404,11 +3409,11 @@ func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStat } // Convert_v1_ContainerStatus_To_core_ContainerStatus is an autogenerated conversion function. -func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { +func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *corev1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { return autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in, out, s) } -func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { +func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *corev1.ContainerStatus, s conversion.Scope) error { out.Name = in.Name if err := Convert_core_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { return err @@ -3422,80 +3427,80 @@ func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerSt out.ImageID = in.ImageID out.ContainerID = in.ContainerID out.Started = (*bool)(unsafe.Pointer(in.Started)) - out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources)) - out.Resources = (*v1.ResourceRequirements)(unsafe.Pointer(in.Resources)) - out.VolumeMounts = *(*[]v1.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts)) - out.User = (*v1.ContainerUser)(unsafe.Pointer(in.User)) - out.AllocatedResourcesStatus = *(*[]v1.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus)) + out.AllocatedResources = *(*corev1.ResourceList)(unsafe.Pointer(&in.AllocatedResources)) + out.Resources = (*corev1.ResourceRequirements)(unsafe.Pointer(in.Resources)) + out.VolumeMounts = *(*[]corev1.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts)) + out.User = (*corev1.ContainerUser)(unsafe.Pointer(in.User)) + out.AllocatedResourcesStatus = *(*[]corev1.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus)) return nil } // Convert_core_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. -func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { +func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *corev1.ContainerStatus, s conversion.Scope) error { return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s) } -func autoConvert_v1_ContainerUser_To_core_ContainerUser(in *v1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error { +func autoConvert_v1_ContainerUser_To_core_ContainerUser(in *corev1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error { out.Linux = (*core.LinuxContainerUser)(unsafe.Pointer(in.Linux)) return nil } // Convert_v1_ContainerUser_To_core_ContainerUser is an autogenerated conversion function. -func Convert_v1_ContainerUser_To_core_ContainerUser(in *v1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error { +func Convert_v1_ContainerUser_To_core_ContainerUser(in *corev1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error { return autoConvert_v1_ContainerUser_To_core_ContainerUser(in, out, s) } -func autoConvert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *v1.ContainerUser, s conversion.Scope) error { - out.Linux = (*v1.LinuxContainerUser)(unsafe.Pointer(in.Linux)) +func autoConvert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *corev1.ContainerUser, s conversion.Scope) error { + out.Linux = (*corev1.LinuxContainerUser)(unsafe.Pointer(in.Linux)) return nil } // Convert_core_ContainerUser_To_v1_ContainerUser is an autogenerated conversion function. -func Convert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *v1.ContainerUser, s conversion.Scope) error { +func Convert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *corev1.ContainerUser, s conversion.Scope) error { return autoConvert_core_ContainerUser_To_v1_ContainerUser(in, out, s) } -func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { +func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *corev1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { out.Port = in.Port return nil } // Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint is an autogenerated conversion function. -func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { +func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *corev1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { return autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in, out, s) } -func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { +func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *corev1.DaemonEndpoint, s conversion.Scope) error { out.Port = in.Port return nil } // Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. -func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { +func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *corev1.DaemonEndpoint, s conversion.Scope) error { return autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) } -func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { +func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *corev1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection is an autogenerated conversion function. -func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { +func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *corev1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { return autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in, out, s) } -func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) +func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *corev1.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]corev1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. -func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { +func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *corev1.DownwardAPIProjection, s conversion.Scope) error { return autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) } -func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { +func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *corev1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { out.Path = in.Path out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) @@ -3504,68 +3509,68 @@ func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.D } // Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { +func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *corev1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { return autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in, out, s) } -func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { +func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *corev1.DownwardAPIVolumeFile, s conversion.Scope) error { out.Path = in.Path - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.FieldRef = (*corev1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*corev1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) out.Mode = (*int32)(unsafe.Pointer(in.Mode)) return nil } // Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { +func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *corev1.DownwardAPIVolumeFile, s conversion.Scope) error { return autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) } -func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { +func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *corev1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) return nil } // Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { +func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *corev1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { return autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in, out, s) } -func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) +func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *corev1.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]corev1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) return nil } // Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { +func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *corev1.DownwardAPIVolumeSource, s conversion.Scope) error { return autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) } -func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { +func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *corev1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { out.Medium = core.StorageMedium(in.Medium) out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) return nil } // Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { +func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *corev1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { return autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in, out, s) } -func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = v1.StorageMedium(in.Medium) +func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *corev1.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = corev1.StorageMedium(in.Medium) out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) return nil } // Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { +func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *corev1.EmptyDirVolumeSource, s conversion.Scope) error { return autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) } -func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { +func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *corev1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { out.IP = in.IP out.Hostname = in.Hostname out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) @@ -3574,24 +3579,24 @@ func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddre } // Convert_v1_EndpointAddress_To_core_EndpointAddress is an autogenerated conversion function. -func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { +func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *corev1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { return autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in, out, s) } -func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { +func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *corev1.EndpointAddress, s conversion.Scope) error { out.IP = in.IP out.Hostname = in.Hostname out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) + out.TargetRef = (*corev1.ObjectReference)(unsafe.Pointer(in.TargetRef)) return nil } // Convert_core_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. -func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { +func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *corev1.EndpointAddress, s conversion.Scope) error { return autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in, out, s) } -func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { +func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *corev1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { out.Name = in.Name out.Port = in.Port out.Protocol = core.Protocol(in.Protocol) @@ -3600,24 +3605,24 @@ func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out * } // Convert_v1_EndpointPort_To_core_EndpointPort is an autogenerated conversion function. -func Convert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { +func Convert_v1_EndpointPort_To_core_EndpointPort(in *corev1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { return autoConvert_v1_EndpointPort_To_core_EndpointPort(in, out, s) } -func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { +func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *corev1.EndpointPort, s conversion.Scope) error { out.Name = in.Name out.Port = in.Port - out.Protocol = v1.Protocol(in.Protocol) + out.Protocol = corev1.Protocol(in.Protocol) out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) return nil } // Convert_core_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. -func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { +func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *corev1.EndpointPort, s conversion.Scope) error { return autoConvert_core_EndpointPort_To_v1_EndpointPort(in, out, s) } -func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { +func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *corev1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { out.Addresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.Addresses)) out.NotReadyAddresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) out.Ports = *(*[]core.EndpointPort)(unsafe.Pointer(&in.Ports)) @@ -3625,67 +3630,67 @@ func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, } // Convert_v1_EndpointSubset_To_core_EndpointSubset is an autogenerated conversion function. -func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { +func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *corev1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { return autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in, out, s) } -func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) +func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *corev1.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]corev1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]corev1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]corev1.EndpointPort)(unsafe.Pointer(&in.Ports)) return nil } // Convert_core_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. -func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { +func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *corev1.EndpointSubset, s conversion.Scope) error { return autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in, out, s) } -func autoConvert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { +func autoConvert_v1_Endpoints_To_core_Endpoints(in *corev1.Endpoints, out *core.Endpoints, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Subsets = *(*[]core.EndpointSubset)(unsafe.Pointer(&in.Subsets)) return nil } // Convert_v1_Endpoints_To_core_Endpoints is an autogenerated conversion function. -func Convert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { +func Convert_v1_Endpoints_To_core_Endpoints(in *corev1.Endpoints, out *core.Endpoints, s conversion.Scope) error { return autoConvert_v1_Endpoints_To_core_Endpoints(in, out, s) } -func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { +func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *corev1.Endpoints, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + out.Subsets = *(*[]corev1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) return nil } // Convert_core_Endpoints_To_v1_Endpoints is an autogenerated conversion function. -func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { +func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *corev1.Endpoints, s conversion.Scope) error { return autoConvert_core_Endpoints_To_v1_Endpoints(in, out, s) } -func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { +func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *corev1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.Endpoints)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_EndpointsList_To_core_EndpointsList is an autogenerated conversion function. -func Convert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { +func Convert_v1_EndpointsList_To_core_EndpointsList(in *corev1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { return autoConvert_v1_EndpointsList_To_core_EndpointsList(in, out, s) } -func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { +func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *corev1.EndpointsList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.Endpoints)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. -func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { +func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *corev1.EndpointsList, s conversion.Scope) error { return autoConvert_core_EndpointsList_To_v1_EndpointsList(in, out, s) } -func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { +func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *corev1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { out.Prefix = in.Prefix out.ConfigMapRef = (*core.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) out.SecretRef = (*core.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) @@ -3693,23 +3698,23 @@ func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, ou } // Convert_v1_EnvFromSource_To_core_EnvFromSource is an autogenerated conversion function. -func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { +func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *corev1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { return autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in, out, s) } -func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { +func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *corev1.EnvFromSource, s conversion.Scope) error { out.Prefix = in.Prefix - out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + out.ConfigMapRef = (*corev1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*corev1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) return nil } // Convert_core_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. -func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { +func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *corev1.EnvFromSource, s conversion.Scope) error { return autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in, out, s) } -func autoConvert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { +func autoConvert_v1_EnvVar_To_core_EnvVar(in *corev1.EnvVar, out *core.EnvVar, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value out.ValueFrom = (*core.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) @@ -3717,23 +3722,23 @@ func autoConvert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s con } // Convert_v1_EnvVar_To_core_EnvVar is an autogenerated conversion function. -func Convert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { +func Convert_v1_EnvVar_To_core_EnvVar(in *corev1.EnvVar, out *core.EnvVar, s conversion.Scope) error { return autoConvert_v1_EnvVar_To_core_EnvVar(in, out, s) } -func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { +func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *corev1.EnvVar, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value - out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + out.ValueFrom = (*corev1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) return nil } // Convert_core_EnvVar_To_v1_EnvVar is an autogenerated conversion function. -func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { +func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *corev1.EnvVar, s conversion.Scope) error { return autoConvert_core_EnvVar_To_v1_EnvVar(in, out, s) } -func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { +func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *corev1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) out.ConfigMapKeyRef = (*core.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) @@ -3742,24 +3747,24 @@ func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out * } // Convert_v1_EnvVarSource_To_core_EnvVarSource is an autogenerated conversion function. -func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { +func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *corev1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { return autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in, out, s) } -func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) +func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *corev1.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*corev1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*corev1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*corev1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*corev1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) return nil } // Convert_core_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. -func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { +func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *corev1.EnvVarSource, s conversion.Scope) error { return autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in, out, s) } -func autoConvert_v1_EphemeralContainer_To_core_EphemeralContainer(in *v1.EphemeralContainer, out *core.EphemeralContainer, s conversion.Scope) error { +func autoConvert_v1_EphemeralContainer_To_core_EphemeralContainer(in *corev1.EphemeralContainer, out *core.EphemeralContainer, s conversion.Scope) error { if err := Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(&in.EphemeralContainerCommon, &out.EphemeralContainerCommon, s); err != nil { return err } @@ -3768,11 +3773,11 @@ func autoConvert_v1_EphemeralContainer_To_core_EphemeralContainer(in *v1.Ephemer } // Convert_v1_EphemeralContainer_To_core_EphemeralContainer is an autogenerated conversion function. -func Convert_v1_EphemeralContainer_To_core_EphemeralContainer(in *v1.EphemeralContainer, out *core.EphemeralContainer, s conversion.Scope) error { +func Convert_v1_EphemeralContainer_To_core_EphemeralContainer(in *corev1.EphemeralContainer, out *core.EphemeralContainer, s conversion.Scope) error { return autoConvert_v1_EphemeralContainer_To_core_EphemeralContainer(in, out, s) } -func autoConvert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.EphemeralContainer, out *v1.EphemeralContainer, s conversion.Scope) error { +func autoConvert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.EphemeralContainer, out *corev1.EphemeralContainer, s conversion.Scope) error { if err := Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(&in.EphemeralContainerCommon, &out.EphemeralContainerCommon, s); err != nil { return err } @@ -3781,11 +3786,11 @@ func autoConvert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.Ephem } // Convert_core_EphemeralContainer_To_v1_EphemeralContainer is an autogenerated conversion function. -func Convert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.EphemeralContainer, out *v1.EphemeralContainer, s conversion.Scope) error { +func Convert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.EphemeralContainer, out *corev1.EphemeralContainer, s conversion.Scope) error { return autoConvert_core_EphemeralContainer_To_v1_EphemeralContainer(in, out, s) } -func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in *v1.EphemeralContainerCommon, out *core.EphemeralContainerCommon, s conversion.Scope) error { +func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in *corev1.EphemeralContainerCommon, out *core.EphemeralContainerCommon, s conversion.Scope) error { out.Name = in.Name out.Image = in.Image out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) @@ -3816,34 +3821,34 @@ func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in } // Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon is an autogenerated conversion function. -func Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in *v1.EphemeralContainerCommon, out *core.EphemeralContainerCommon, s conversion.Scope) error { +func Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in *corev1.EphemeralContainerCommon, out *core.EphemeralContainerCommon, s conversion.Scope) error { return autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in, out, s) } -func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in *core.EphemeralContainerCommon, out *v1.EphemeralContainerCommon, s conversion.Scope) error { +func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in *core.EphemeralContainerCommon, out *corev1.EphemeralContainerCommon, s conversion.Scope) error { out.Name = in.Name out.Image = in.Image out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) out.WorkingDir = in.WorkingDir - out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.Ports = *(*[]corev1.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]corev1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.Env)) if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } - out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy)) - out.RestartPolicy = (*v1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy)) - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) - out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.StartupProbe = (*v1.Probe)(unsafe.Pointer(in.StartupProbe)) - out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.ResizePolicy = *(*[]corev1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy)) + out.RestartPolicy = (*corev1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy)) + out.VolumeMounts = *(*[]corev1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]corev1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*corev1.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*corev1.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.StartupProbe = (*corev1.Probe)(unsafe.Pointer(in.StartupProbe)) + out.Lifecycle = (*corev1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - out.SecurityContext = (*v1.SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.TerminationMessagePolicy = corev1.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = corev1.PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*corev1.SecurityContext)(unsafe.Pointer(in.SecurityContext)) out.Stdin = in.Stdin out.StdinOnce = in.StdinOnce out.TTY = in.TTY @@ -3851,31 +3856,31 @@ func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in } // Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon is an autogenerated conversion function. -func Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in *core.EphemeralContainerCommon, out *v1.EphemeralContainerCommon, s conversion.Scope) error { +func Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in *core.EphemeralContainerCommon, out *corev1.EphemeralContainerCommon, s conversion.Scope) error { return autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in, out, s) } -func autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *v1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error { +func autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *corev1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error { out.VolumeClaimTemplate = (*core.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate)) return nil } // Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource is an autogenerated conversion function. -func Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *v1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error { +func Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *corev1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error { return autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in, out, s) } -func autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *v1.EphemeralVolumeSource, s conversion.Scope) error { - out.VolumeClaimTemplate = (*v1.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate)) +func autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *corev1.EphemeralVolumeSource, s conversion.Scope) error { + out.VolumeClaimTemplate = (*corev1.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate)) return nil } // Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource is an autogenerated conversion function. -func Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *v1.EphemeralVolumeSource, s conversion.Scope) error { +func Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *corev1.EphemeralVolumeSource, s conversion.Scope) error { return autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in, out, s) } -func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { +func autoConvert_v1_Event_To_core_Event(in *corev1.Event, out *core.Event, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err @@ -3899,11 +3904,11 @@ func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s convers } // Convert_v1_Event_To_core_Event is an autogenerated conversion function. -func Convert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { +func Convert_v1_Event_To_core_Event(in *corev1.Event, out *core.Event, s conversion.Scope) error { return autoConvert_v1_Event_To_core_Event(in, out, s) } -func autoConvert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { +func autoConvert_core_Event_To_v1_Event(in *core.Event, out *corev1.Event, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err @@ -3918,106 +3923,106 @@ func autoConvert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s convers out.Count = in.Count out.Type = in.Type out.EventTime = in.EventTime - out.Series = (*v1.EventSeries)(unsafe.Pointer(in.Series)) + out.Series = (*corev1.EventSeries)(unsafe.Pointer(in.Series)) out.Action = in.Action - out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related)) + out.Related = (*corev1.ObjectReference)(unsafe.Pointer(in.Related)) out.ReportingController = in.ReportingController out.ReportingInstance = in.ReportingInstance return nil } // Convert_core_Event_To_v1_Event is an autogenerated conversion function. -func Convert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { +func Convert_core_Event_To_v1_Event(in *core.Event, out *corev1.Event, s conversion.Scope) error { return autoConvert_core_Event_To_v1_Event(in, out, s) } -func autoConvert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { +func autoConvert_v1_EventList_To_core_EventList(in *corev1.EventList, out *core.EventList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.Event)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_EventList_To_core_EventList is an autogenerated conversion function. -func Convert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { +func Convert_v1_EventList_To_core_EventList(in *corev1.EventList, out *core.EventList, s conversion.Scope) error { return autoConvert_v1_EventList_To_core_EventList(in, out, s) } -func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { +func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *corev1.EventList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.Event)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_EventList_To_v1_EventList is an autogenerated conversion function. -func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { +func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *corev1.EventList, s conversion.Scope) error { return autoConvert_core_EventList_To_v1_EventList(in, out, s) } -func autoConvert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { +func autoConvert_v1_EventSeries_To_core_EventSeries(in *corev1.EventSeries, out *core.EventSeries, s conversion.Scope) error { out.Count = in.Count out.LastObservedTime = in.LastObservedTime return nil } // Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function. -func Convert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { +func Convert_v1_EventSeries_To_core_EventSeries(in *corev1.EventSeries, out *core.EventSeries, s conversion.Scope) error { return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s) } -func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { +func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *corev1.EventSeries, s conversion.Scope) error { out.Count = in.Count out.LastObservedTime = in.LastObservedTime return nil } // Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function. -func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { +func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *corev1.EventSeries, s conversion.Scope) error { return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s) } -func autoConvert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { +func autoConvert_v1_EventSource_To_core_EventSource(in *corev1.EventSource, out *core.EventSource, s conversion.Scope) error { out.Component = in.Component out.Host = in.Host return nil } // Convert_v1_EventSource_To_core_EventSource is an autogenerated conversion function. -func Convert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { +func Convert_v1_EventSource_To_core_EventSource(in *corev1.EventSource, out *core.EventSource, s conversion.Scope) error { return autoConvert_v1_EventSource_To_core_EventSource(in, out, s) } -func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { +func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *corev1.EventSource, s conversion.Scope) error { out.Component = in.Component out.Host = in.Host return nil } // Convert_core_EventSource_To_v1_EventSource is an autogenerated conversion function. -func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { +func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *corev1.EventSource, s conversion.Scope) error { return autoConvert_core_EventSource_To_v1_EventSource(in, out, s) } -func autoConvert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { +func autoConvert_v1_ExecAction_To_core_ExecAction(in *corev1.ExecAction, out *core.ExecAction, s conversion.Scope) error { out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) return nil } // Convert_v1_ExecAction_To_core_ExecAction is an autogenerated conversion function. -func Convert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { +func Convert_v1_ExecAction_To_core_ExecAction(in *corev1.ExecAction, out *core.ExecAction, s conversion.Scope) error { return autoConvert_v1_ExecAction_To_core_ExecAction(in, out, s) } -func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { +func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *corev1.ExecAction, s conversion.Scope) error { out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) return nil } // Convert_core_ExecAction_To_v1_ExecAction is an autogenerated conversion function. -func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { +func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *corev1.ExecAction, s conversion.Scope) error { return autoConvert_core_ExecAction_To_v1_ExecAction(in, out, s) } -func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { +func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *corev1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) out.Lun = (*int32)(unsafe.Pointer(in.Lun)) out.FSType = in.FSType @@ -4027,11 +4032,11 @@ func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, } // Convert_v1_FCVolumeSource_To_core_FCVolumeSource is an autogenerated conversion function. -func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { +func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *corev1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { return autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in, out, s) } -func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { +func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *corev1.FCVolumeSource, s conversion.Scope) error { out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) out.Lun = (*int32)(unsafe.Pointer(in.Lun)) out.FSType = in.FSType @@ -4041,11 +4046,11 @@ func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSourc } // Convert_core_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. -func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { +func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *corev1.FCVolumeSource, s conversion.Scope) error { return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) } -func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *corev1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.FSType = in.FSType out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) @@ -4055,25 +4060,25 @@ func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSourc } // Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *corev1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in, out, s) } -func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *corev1.FlexPersistentVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.FSType = in.FSType - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef)) out.ReadOnly = in.ReadOnly out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) return nil } // Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *corev1.FlexPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in, out, s) } -func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { +func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *corev1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.FSType = in.FSType out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) @@ -4083,47 +4088,47 @@ func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeS } // Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource is an autogenerated conversion function. -func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { +func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *corev1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { return autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in, out, s) } -func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { +func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *corev1.FlexVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.FSType = in.FSType - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) out.ReadOnly = in.ReadOnly out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) return nil } // Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. -func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { +func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *corev1.FlexVolumeSource, s conversion.Scope) error { return autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) } -func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { +func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *corev1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { out.DatasetName = in.DatasetName out.DatasetUUID = in.DatasetUUID return nil } // Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource is an autogenerated conversion function. -func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { +func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *corev1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { return autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in, out, s) } -func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { +func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *corev1.FlockerVolumeSource, s conversion.Scope) error { out.DatasetName = in.DatasetName out.DatasetUUID = in.DatasetUUID return nil } // Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. -func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { +func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *corev1.FlockerVolumeSource, s conversion.Scope) error { return autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) } -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *corev1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { out.PDName = in.PDName out.FSType = in.FSType out.Partition = in.Partition @@ -4132,11 +4137,11 @@ func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolum } // Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { +func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *corev1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { return autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in, out, s) } -func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { +func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *corev1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { out.PDName = in.PDName out.FSType = in.FSType out.Partition = in.Partition @@ -4145,33 +4150,33 @@ func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolum } // Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { +func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *corev1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) } -func autoConvert_v1_GRPCAction_To_core_GRPCAction(in *v1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error { +func autoConvert_v1_GRPCAction_To_core_GRPCAction(in *corev1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error { out.Port = in.Port out.Service = (*string)(unsafe.Pointer(in.Service)) return nil } // Convert_v1_GRPCAction_To_core_GRPCAction is an autogenerated conversion function. -func Convert_v1_GRPCAction_To_core_GRPCAction(in *v1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error { +func Convert_v1_GRPCAction_To_core_GRPCAction(in *corev1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error { return autoConvert_v1_GRPCAction_To_core_GRPCAction(in, out, s) } -func autoConvert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *v1.GRPCAction, s conversion.Scope) error { +func autoConvert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *corev1.GRPCAction, s conversion.Scope) error { out.Port = in.Port out.Service = (*string)(unsafe.Pointer(in.Service)) return nil } // Convert_core_GRPCAction_To_v1_GRPCAction is an autogenerated conversion function. -func Convert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *v1.GRPCAction, s conversion.Scope) error { +func Convert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *corev1.GRPCAction, s conversion.Scope) error { return autoConvert_core_GRPCAction_To_v1_GRPCAction(in, out, s) } -func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { +func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *corev1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { out.Repository = in.Repository out.Revision = in.Revision out.Directory = in.Directory @@ -4179,11 +4184,11 @@ func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRe } // Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { +func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *corev1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { return autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in, out, s) } -func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { +func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *corev1.GitRepoVolumeSource, s conversion.Scope) error { out.Repository = in.Repository out.Revision = in.Revision out.Directory = in.Directory @@ -4191,11 +4196,11 @@ func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.Git } // Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { +func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *corev1.GitRepoVolumeSource, s conversion.Scope) error { return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) } -func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *corev1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -4204,11 +4209,11 @@ func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentV } // Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *corev1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in, out, s) } -func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *corev1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -4217,11 +4222,11 @@ func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentV } // Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *corev1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in, out, s) } -func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { +func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *corev1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -4229,11 +4234,11 @@ func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.G } // Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { +func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *corev1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { return autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in, out, s) } -func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { +func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *corev1.GlusterfsVolumeSource, s conversion.Scope) error { out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -4241,11 +4246,11 @@ func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core } // Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { +func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *corev1.GlusterfsVolumeSource, s conversion.Scope) error { return autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) } -func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { +func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *corev1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { out.Path = in.Path out.Port = in.Port out.Host = in.Host @@ -4255,111 +4260,111 @@ func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, ou } // Convert_v1_HTTPGetAction_To_core_HTTPGetAction is an autogenerated conversion function. -func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { +func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *corev1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { return autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in, out, s) } -func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { +func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *corev1.HTTPGetAction, s conversion.Scope) error { out.Path = in.Path out.Port = in.Port out.Host = in.Host - out.Scheme = v1.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + out.Scheme = corev1.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]corev1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) return nil } // Convert_core_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. -func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { +func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *corev1.HTTPGetAction, s conversion.Scope) error { return autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) } -func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { +func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *corev1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value return nil } // Convert_v1_HTTPHeader_To_core_HTTPHeader is an autogenerated conversion function. -func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { +func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *corev1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { return autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in, out, s) } -func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { +func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *corev1.HTTPHeader, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value return nil } // Convert_core_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. -func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { +func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *corev1.HTTPHeader, s conversion.Scope) error { return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s) } -func autoConvert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { +func autoConvert_v1_HostAlias_To_core_HostAlias(in *corev1.HostAlias, out *core.HostAlias, s conversion.Scope) error { out.IP = in.IP out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) return nil } // Convert_v1_HostAlias_To_core_HostAlias is an autogenerated conversion function. -func Convert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { +func Convert_v1_HostAlias_To_core_HostAlias(in *corev1.HostAlias, out *core.HostAlias, s conversion.Scope) error { return autoConvert_v1_HostAlias_To_core_HostAlias(in, out, s) } -func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { +func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *corev1.HostAlias, s conversion.Scope) error { out.IP = in.IP out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) return nil } // Convert_core_HostAlias_To_v1_HostAlias is an autogenerated conversion function. -func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { +func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *corev1.HostAlias, s conversion.Scope) error { return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s) } -func autoConvert_v1_HostIP_To_core_HostIP(in *v1.HostIP, out *core.HostIP, s conversion.Scope) error { +func autoConvert_v1_HostIP_To_core_HostIP(in *corev1.HostIP, out *core.HostIP, s conversion.Scope) error { out.IP = in.IP return nil } // Convert_v1_HostIP_To_core_HostIP is an autogenerated conversion function. -func Convert_v1_HostIP_To_core_HostIP(in *v1.HostIP, out *core.HostIP, s conversion.Scope) error { +func Convert_v1_HostIP_To_core_HostIP(in *corev1.HostIP, out *core.HostIP, s conversion.Scope) error { return autoConvert_v1_HostIP_To_core_HostIP(in, out, s) } -func autoConvert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *v1.HostIP, s conversion.Scope) error { +func autoConvert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *corev1.HostIP, s conversion.Scope) error { out.IP = in.IP return nil } // Convert_core_HostIP_To_v1_HostIP is an autogenerated conversion function. -func Convert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *v1.HostIP, s conversion.Scope) error { +func Convert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *corev1.HostIP, s conversion.Scope) error { return autoConvert_core_HostIP_To_v1_HostIP(in, out, s) } -func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { +func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *corev1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { out.Path = in.Path out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type)) return nil } // Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource is an autogenerated conversion function. -func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { +func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *corev1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { return autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in, out, s) } -func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { +func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *corev1.HostPathVolumeSource, s conversion.Scope) error { out.Path = in.Path - out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) + out.Type = (*corev1.HostPathType)(unsafe.Pointer(in.Type)) return nil } // Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. -func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { +func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *corev1.HostPathVolumeSource, s conversion.Scope) error { return autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) } -func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *corev1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun @@ -4375,11 +4380,11 @@ func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSou } // Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *corev1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in, out, s) } -func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *corev1.ISCSIPersistentVolumeSource, s conversion.Scope) error { out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun @@ -4389,17 +4394,17 @@ func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSou out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef)) out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) return nil } // Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *corev1.ISCSIPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in, out, s) } -func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *corev1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun @@ -4415,11 +4420,11 @@ func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolu } // Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { +func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *corev1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { return autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in, out, s) } -func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { +func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *corev1.ISCSIVolumeSource, s conversion.Scope) error { out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun @@ -4429,39 +4434,39 @@ func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVo out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) return nil } // Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { +func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *corev1.ISCSIVolumeSource, s conversion.Scope) error { return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) } -func autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *v1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *corev1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error { out.Reference = in.Reference out.PullPolicy = core.PullPolicy(in.PullPolicy) return nil } // Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource is an autogenerated conversion function. -func Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *v1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error { +func Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *corev1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error { return autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in, out, s) } -func autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *v1.ImageVolumeSource, s conversion.Scope) error { +func autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *corev1.ImageVolumeSource, s conversion.Scope) error { out.Reference = in.Reference - out.PullPolicy = v1.PullPolicy(in.PullPolicy) + out.PullPolicy = corev1.PullPolicy(in.PullPolicy) return nil } // Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource is an autogenerated conversion function. -func Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *v1.ImageVolumeSource, s conversion.Scope) error { +func Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *corev1.ImageVolumeSource, s conversion.Scope) error { return autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in, out, s) } -func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { +func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *corev1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { out.Key = in.Key out.Path = in.Path out.Mode = (*int32)(unsafe.Pointer(in.Mode)) @@ -4469,11 +4474,11 @@ func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyT } // Convert_v1_KeyToPath_To_core_KeyToPath is an autogenerated conversion function. -func Convert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { +func Convert_v1_KeyToPath_To_core_KeyToPath(in *corev1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { return autoConvert_v1_KeyToPath_To_core_KeyToPath(in, out, s) } -func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { +func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *corev1.KeyToPath, s conversion.Scope) error { out.Key = in.Key out.Path = in.Path out.Mode = (*int32)(unsafe.Pointer(in.Mode)) @@ -4481,33 +4486,33 @@ func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyT } // Convert_core_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. -func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { +func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *corev1.KeyToPath, s conversion.Scope) error { return autoConvert_core_KeyToPath_To_v1_KeyToPath(in, out, s) } -func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { +func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *corev1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { out.PostStart = (*core.LifecycleHandler)(unsafe.Pointer(in.PostStart)) out.PreStop = (*core.LifecycleHandler)(unsafe.Pointer(in.PreStop)) return nil } // Convert_v1_Lifecycle_To_core_Lifecycle is an autogenerated conversion function. -func Convert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { +func Convert_v1_Lifecycle_To_core_Lifecycle(in *corev1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { return autoConvert_v1_Lifecycle_To_core_Lifecycle(in, out, s) } -func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - out.PostStart = (*v1.LifecycleHandler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*v1.LifecycleHandler)(unsafe.Pointer(in.PreStop)) +func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *corev1.Lifecycle, s conversion.Scope) error { + out.PostStart = (*corev1.LifecycleHandler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*corev1.LifecycleHandler)(unsafe.Pointer(in.PreStop)) return nil } // Convert_core_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. -func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { +func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *corev1.Lifecycle, s conversion.Scope) error { return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s) } -func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error { +func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *corev1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error { out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) @@ -4516,24 +4521,24 @@ func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHa } // Convert_v1_LifecycleHandler_To_core_LifecycleHandler is an autogenerated conversion function. -func Convert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error { +func Convert_v1_LifecycleHandler_To_core_LifecycleHandler(in *corev1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error { return autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in, out, s) } -func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *v1.LifecycleHandler, s conversion.Scope) error { - out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - out.Sleep = (*v1.SleepAction)(unsafe.Pointer(in.Sleep)) +func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *corev1.LifecycleHandler, s conversion.Scope) error { + out.Exec = (*corev1.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*corev1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*corev1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + out.Sleep = (*corev1.SleepAction)(unsafe.Pointer(in.Sleep)) return nil } // Convert_core_LifecycleHandler_To_v1_LifecycleHandler is an autogenerated conversion function. -func Convert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *v1.LifecycleHandler, s conversion.Scope) error { +func Convert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *corev1.LifecycleHandler, s conversion.Scope) error { return autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in, out, s) } -func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { +func autoConvert_v1_LimitRange_To_core_LimitRange(in *corev1.LimitRange, out *core.LimitRange, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -4542,11 +4547,11 @@ func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.L } // Convert_v1_LimitRange_To_core_LimitRange is an autogenerated conversion function. -func Convert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { +func Convert_v1_LimitRange_To_core_LimitRange(in *corev1.LimitRange, out *core.LimitRange, s conversion.Scope) error { return autoConvert_v1_LimitRange_To_core_LimitRange(in, out, s) } -func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { +func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *corev1.LimitRange, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -4555,11 +4560,11 @@ func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.L } // Convert_core_LimitRange_To_v1_LimitRange is an autogenerated conversion function. -func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { +func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *corev1.LimitRange, s conversion.Scope) error { return autoConvert_core_LimitRange_To_v1_LimitRange(in, out, s) } -func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { +func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *corev1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { out.Type = core.LimitType(in.Type) out.Max = *(*core.ResourceList)(unsafe.Pointer(&in.Max)) out.Min = *(*core.ResourceList)(unsafe.Pointer(&in.Min)) @@ -4570,68 +4575,68 @@ func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, } // Convert_v1_LimitRangeItem_To_core_LimitRangeItem is an autogenerated conversion function. -func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { +func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *corev1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { return autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in, out, s) } -func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - out.Type = v1.LimitType(in.Type) - out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) +func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *corev1.LimitRangeItem, s conversion.Scope) error { + out.Type = corev1.LimitType(in.Type) + out.Max = *(*corev1.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*corev1.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*corev1.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*corev1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*corev1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) return nil } // Convert_core_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. -func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { +func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *corev1.LimitRangeItem, s conversion.Scope) error { return autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) } -func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { +func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *corev1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.LimitRange)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_LimitRangeList_To_core_LimitRangeList is an autogenerated conversion function. -func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { +func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *corev1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { return autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in, out, s) } -func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { +func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *corev1.LimitRangeList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.LimitRange)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. -func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { +func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *corev1.LimitRangeList, s conversion.Scope) error { return autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in, out, s) } -func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { +func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *corev1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { out.Limits = *(*[]core.LimitRangeItem)(unsafe.Pointer(&in.Limits)) return nil } // Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec is an autogenerated conversion function. -func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { +func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *corev1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { return autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in, out, s) } -func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) +func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *corev1.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]corev1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) return nil } // Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. -func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { +func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *corev1.LimitRangeSpec, s conversion.Scope) error { return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) } -func autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *v1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error { +func autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *corev1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error { out.UID = in.UID out.GID = in.GID out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) @@ -4639,11 +4644,11 @@ func autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *v1.LinuxCo } // Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser is an autogenerated conversion function. -func Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *v1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error { +func Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *corev1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error { return autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in, out, s) } -func autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *v1.LinuxContainerUser, s conversion.Scope) error { +func autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *corev1.LinuxContainerUser, s conversion.Scope) error { out.UID = in.UID out.GID = in.GID out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) @@ -4651,11 +4656,11 @@ func autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.Linux } // Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser is an autogenerated conversion function. -func Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *v1.LinuxContainerUser, s conversion.Scope) error { +func Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *corev1.LinuxContainerUser, s conversion.Scope) error { return autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in, out, s) } -func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { +func autoConvert_v1_List_To_core_List(in *corev1.List, out *core.List, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -4672,11 +4677,11 @@ func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion. } // Convert_v1_List_To_core_List is an autogenerated conversion function. -func Convert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { +func Convert_v1_List_To_core_List(in *corev1.List, out *core.List, s conversion.Scope) error { return autoConvert_v1_List_To_core_List(in, out, s) } -func autoConvert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { +func autoConvert_core_List_To_v1_List(in *core.List, out *corev1.List, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -4693,11 +4698,11 @@ func autoConvert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion. } // Convert_core_List_To_v1_List is an autogenerated conversion function. -func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { +func Convert_core_List_To_v1_List(in *core.List, out *corev1.List, s conversion.Scope) error { return autoConvert_core_List_To_v1_List(in, out, s) } -func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { +func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *corev1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { out.IP = in.IP out.Hostname = in.Hostname out.IPMode = (*core.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode)) @@ -4706,108 +4711,108 @@ func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadB } // Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress is an autogenerated conversion function. -func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { +func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *corev1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { return autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in, out, s) } -func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { +func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *corev1.LoadBalancerIngress, s conversion.Scope) error { out.IP = in.IP out.Hostname = in.Hostname - out.IPMode = (*v1.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode)) - out.Ports = *(*[]v1.PortStatus)(unsafe.Pointer(&in.Ports)) + out.IPMode = (*corev1.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode)) + out.Ports = *(*[]corev1.PortStatus)(unsafe.Pointer(&in.Ports)) return nil } // Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. -func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { +func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *corev1.LoadBalancerIngress, s conversion.Scope) error { return autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) } -func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { +func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *corev1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { out.Ingress = *(*[]core.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) return nil } // Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function. -func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { +func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *corev1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) } -func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) +func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *corev1.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]corev1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) return nil } // Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. -func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { +func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *corev1.LoadBalancerStatus, s conversion.Scope) error { return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) } -func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { +func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *corev1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { out.Name = in.Name return nil } // Convert_v1_LocalObjectReference_To_core_LocalObjectReference is an autogenerated conversion function. -func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { +func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *corev1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { return autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in, out, s) } -func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { +func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *corev1.LocalObjectReference, s conversion.Scope) error { out.Name = in.Name return nil } // Convert_core_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. -func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { +func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *corev1.LocalObjectReference, s conversion.Scope) error { return autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) } -func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { +func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *corev1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { out.Path = in.Path out.FSType = (*string)(unsafe.Pointer(in.FSType)) return nil } // Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource is an autogenerated conversion function. -func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { +func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *corev1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { return autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in, out, s) } -func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { +func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *corev1.LocalVolumeSource, s conversion.Scope) error { out.Path = in.Path out.FSType = (*string)(unsafe.Pointer(in.FSType)) return nil } // Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. -func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { +func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *corev1.LocalVolumeSource, s conversion.Scope) error { return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) } -func autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *v1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error { +func autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *corev1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error { out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName out.Status = core.PersistentVolumeClaimModifyVolumeStatus(in.Status) return nil } // Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus is an autogenerated conversion function. -func Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *v1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error { +func Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *corev1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error { return autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in, out, s) } -func autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *v1.ModifyVolumeStatus, s conversion.Scope) error { +func autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *corev1.ModifyVolumeStatus, s conversion.Scope) error { out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName - out.Status = v1.PersistentVolumeClaimModifyVolumeStatus(in.Status) + out.Status = corev1.PersistentVolumeClaimModifyVolumeStatus(in.Status) return nil } // Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus is an autogenerated conversion function. -func Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *v1.ModifyVolumeStatus, s conversion.Scope) error { +func Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *corev1.ModifyVolumeStatus, s conversion.Scope) error { return autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in, out, s) } -func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { +func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *corev1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { out.Server = in.Server out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -4815,11 +4820,11 @@ func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSour } // Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource is an autogenerated conversion function. -func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { +func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *corev1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { return autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in, out, s) } -func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { +func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *corev1.NFSVolumeSource, s conversion.Scope) error { out.Server = in.Server out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -4827,11 +4832,11 @@ func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSo } // Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. -func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { +func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *corev1.NFSVolumeSource, s conversion.Scope) error { return autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) } -func autoConvert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { +func autoConvert_v1_Namespace_To_core_Namespace(in *corev1.Namespace, out *core.Namespace, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_NamespaceSpec_To_core_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -4843,11 +4848,11 @@ func autoConvert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Name } // Convert_v1_Namespace_To_core_Namespace is an autogenerated conversion function. -func Convert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { +func Convert_v1_Namespace_To_core_Namespace(in *corev1.Namespace, out *core.Namespace, s conversion.Scope) error { return autoConvert_v1_Namespace_To_core_Namespace(in, out, s) } -func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { +func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *corev1.Namespace, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -4859,11 +4864,11 @@ func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Name } // Convert_core_Namespace_To_v1_Namespace is an autogenerated conversion function. -func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { +func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *corev1.Namespace, s conversion.Scope) error { return autoConvert_core_Namespace_To_v1_Namespace(in, out, s) } -func autoConvert_v1_NamespaceCondition_To_core_NamespaceCondition(in *v1.NamespaceCondition, out *core.NamespaceCondition, s conversion.Scope) error { +func autoConvert_v1_NamespaceCondition_To_core_NamespaceCondition(in *corev1.NamespaceCondition, out *core.NamespaceCondition, s conversion.Scope) error { out.Type = core.NamespaceConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -4873,13 +4878,13 @@ func autoConvert_v1_NamespaceCondition_To_core_NamespaceCondition(in *v1.Namespa } // Convert_v1_NamespaceCondition_To_core_NamespaceCondition is an autogenerated conversion function. -func Convert_v1_NamespaceCondition_To_core_NamespaceCondition(in *v1.NamespaceCondition, out *core.NamespaceCondition, s conversion.Scope) error { +func Convert_v1_NamespaceCondition_To_core_NamespaceCondition(in *corev1.NamespaceCondition, out *core.NamespaceCondition, s conversion.Scope) error { return autoConvert_v1_NamespaceCondition_To_core_NamespaceCondition(in, out, s) } -func autoConvert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.NamespaceCondition, out *v1.NamespaceCondition, s conversion.Scope) error { - out.Type = v1.NamespaceConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) +func autoConvert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.NamespaceCondition, out *corev1.NamespaceCondition, s conversion.Scope) error { + out.Type = corev1.NamespaceConditionType(in.Type) + out.Status = corev1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -4887,75 +4892,75 @@ func autoConvert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.Names } // Convert_core_NamespaceCondition_To_v1_NamespaceCondition is an autogenerated conversion function. -func Convert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.NamespaceCondition, out *v1.NamespaceCondition, s conversion.Scope) error { +func Convert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.NamespaceCondition, out *corev1.NamespaceCondition, s conversion.Scope) error { return autoConvert_core_NamespaceCondition_To_v1_NamespaceCondition(in, out, s) } -func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { +func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *corev1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.Namespace)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_NamespaceList_To_core_NamespaceList is an autogenerated conversion function. -func Convert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { +func Convert_v1_NamespaceList_To_core_NamespaceList(in *corev1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { return autoConvert_v1_NamespaceList_To_core_NamespaceList(in, out, s) } -func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { +func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *corev1.NamespaceList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.Namespace)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. -func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { +func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *corev1.NamespaceList, s conversion.Scope) error { return autoConvert_core_NamespaceList_To_v1_NamespaceList(in, out, s) } -func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { +func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *corev1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { out.Finalizers = *(*[]core.FinalizerName)(unsafe.Pointer(&in.Finalizers)) return nil } // Convert_v1_NamespaceSpec_To_core_NamespaceSpec is an autogenerated conversion function. -func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { +func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *corev1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { return autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in, out, s) } -func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) +func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *corev1.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]corev1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) return nil } // Convert_core_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. -func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { +func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *corev1.NamespaceSpec, s conversion.Scope) error { return autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) } -func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { +func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *corev1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { out.Phase = core.NamespacePhase(in.Phase) out.Conditions = *(*[]core.NamespaceCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_v1_NamespaceStatus_To_core_NamespaceStatus is an autogenerated conversion function. -func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { +func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *corev1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { return autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in, out, s) } -func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - out.Phase = v1.NamespacePhase(in.Phase) - out.Conditions = *(*[]v1.NamespaceCondition)(unsafe.Pointer(&in.Conditions)) +func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *corev1.NamespaceStatus, s conversion.Scope) error { + out.Phase = corev1.NamespacePhase(in.Phase) + out.Conditions = *(*[]corev1.NamespaceCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_core_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. -func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { +func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *corev1.NamespaceStatus, s conversion.Scope) error { return autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) } -func autoConvert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { +func autoConvert_v1_Node_To_core_Node(in *corev1.Node, out *core.Node, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_NodeSpec_To_core_NodeSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -4967,11 +4972,11 @@ func autoConvert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion. } // Convert_v1_Node_To_core_Node is an autogenerated conversion function. -func Convert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { +func Convert_v1_Node_To_core_Node(in *corev1.Node, out *core.Node, s conversion.Scope) error { return autoConvert_v1_Node_To_core_Node(in, out, s) } -func autoConvert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { +func autoConvert_core_Node_To_v1_Node(in *core.Node, out *corev1.Node, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -4983,55 +4988,55 @@ func autoConvert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion. } // Convert_core_Node_To_v1_Node is an autogenerated conversion function. -func Convert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { +func Convert_core_Node_To_v1_Node(in *core.Node, out *corev1.Node, s conversion.Scope) error { return autoConvert_core_Node_To_v1_Node(in, out, s) } -func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { +func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *corev1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { out.Type = core.NodeAddressType(in.Type) out.Address = in.Address return nil } // Convert_v1_NodeAddress_To_core_NodeAddress is an autogenerated conversion function. -func Convert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { +func Convert_v1_NodeAddress_To_core_NodeAddress(in *corev1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { return autoConvert_v1_NodeAddress_To_core_NodeAddress(in, out, s) } -func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - out.Type = v1.NodeAddressType(in.Type) +func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *corev1.NodeAddress, s conversion.Scope) error { + out.Type = corev1.NodeAddressType(in.Type) out.Address = in.Address return nil } // Convert_core_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. -func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { +func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *corev1.NodeAddress, s conversion.Scope) error { return autoConvert_core_NodeAddress_To_v1_NodeAddress(in, out, s) } -func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { +func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *corev1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { out.RequiredDuringSchedulingIgnoredDuringExecution = (*core.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) return nil } // Convert_v1_NodeAffinity_To_core_NodeAffinity is an autogenerated conversion function. -func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { +func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *corev1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { return autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in, out, s) } -func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) +func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *corev1.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*corev1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) return nil } // Convert_core_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. -func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { +func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *corev1.NodeAffinity, s conversion.Scope) error { return autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in, out, s) } -func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { +func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *corev1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { out.Type = core.NodeConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastHeartbeatTime = in.LastHeartbeatTime @@ -5042,13 +5047,13 @@ func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, ou } // Convert_v1_NodeCondition_To_core_NodeCondition is an autogenerated conversion function. -func Convert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { +func Convert_v1_NodeCondition_To_core_NodeCondition(in *corev1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { return autoConvert_v1_NodeCondition_To_core_NodeCondition(in, out, s) } -func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - out.Type = v1.NodeConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) +func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *corev1.NodeCondition, s conversion.Scope) error { + out.Type = corev1.NodeConditionType(in.Type) + out.Status = corev1.ConditionStatus(in.Status) out.LastHeartbeatTime = in.LastHeartbeatTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -5057,31 +5062,31 @@ func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, } // Convert_core_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. -func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { +func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *corev1.NodeCondition, s conversion.Scope) error { return autoConvert_core_NodeCondition_To_v1_NodeCondition(in, out, s) } -func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { +func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *corev1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { out.ConfigMap = (*core.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap)) return nil } // Convert_v1_NodeConfigSource_To_core_NodeConfigSource is an autogenerated conversion function. -func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { +func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *corev1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { return autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in, out, s) } -func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - out.ConfigMap = (*v1.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap)) +func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *corev1.NodeConfigSource, s conversion.Scope) error { + out.ConfigMap = (*corev1.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap)) return nil } // Convert_core_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. -func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { +func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *corev1.NodeConfigSource, s conversion.Scope) error { return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) } -func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error { +func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *corev1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error { out.Assigned = (*core.NodeConfigSource)(unsafe.Pointer(in.Assigned)) out.Active = (*core.NodeConfigSource)(unsafe.Pointer(in.Active)) out.LastKnownGood = (*core.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood)) @@ -5090,24 +5095,24 @@ func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigS } // Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus is an autogenerated conversion function. -func Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error { +func Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *corev1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error { return autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in, out, s) } -func autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error { - out.Assigned = (*v1.NodeConfigSource)(unsafe.Pointer(in.Assigned)) - out.Active = (*v1.NodeConfigSource)(unsafe.Pointer(in.Active)) - out.LastKnownGood = (*v1.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood)) +func autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *corev1.NodeConfigStatus, s conversion.Scope) error { + out.Assigned = (*corev1.NodeConfigSource)(unsafe.Pointer(in.Assigned)) + out.Active = (*corev1.NodeConfigSource)(unsafe.Pointer(in.Active)) + out.LastKnownGood = (*corev1.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood)) out.Error = in.Error return nil } // Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus is an autogenerated conversion function. -func Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error { +func Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *corev1.NodeConfigStatus, s conversion.Scope) error { return autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in, out, s) } -func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { +func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *corev1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { return err } @@ -5115,11 +5120,11 @@ func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeD } // Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { +func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *corev1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { return autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in, out, s) } -func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { +func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *corev1.NodeDaemonEndpoints, s conversion.Scope) error { if err := Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { return err } @@ -5127,31 +5132,31 @@ func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.Nod } // Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { +func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *corev1.NodeDaemonEndpoints, s conversion.Scope) error { return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) } -func autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in *v1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error { +func autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in *corev1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error { out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy)) return nil } // Convert_v1_NodeFeatures_To_core_NodeFeatures is an autogenerated conversion function. -func Convert_v1_NodeFeatures_To_core_NodeFeatures(in *v1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error { +func Convert_v1_NodeFeatures_To_core_NodeFeatures(in *corev1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error { return autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in, out, s) } -func autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *v1.NodeFeatures, s conversion.Scope) error { +func autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *corev1.NodeFeatures, s conversion.Scope) error { out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy)) return nil } // Convert_core_NodeFeatures_To_v1_NodeFeatures is an autogenerated conversion function. -func Convert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *v1.NodeFeatures, s conversion.Scope) error { +func Convert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *corev1.NodeFeatures, s conversion.Scope) error { return autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in, out, s) } -func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { +func autoConvert_v1_NodeList_To_core_NodeList(in *corev1.NodeList, out *core.NodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -5168,15 +5173,15 @@ func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeLis } // Convert_v1_NodeList_To_core_NodeList is an autogenerated conversion function. -func Convert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { +func Convert_v1_NodeList_To_core_NodeList(in *corev1.NodeList, out *core.NodeList, s conversion.Scope) error { return autoConvert_v1_NodeList_To_core_NodeList(in, out, s) } -func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { +func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *corev1.NodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.Node, len(*in)) + *out = make([]corev1.Node, len(*in)) for i := range *in { if err := Convert_core_Node_To_v1_Node(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -5189,31 +5194,31 @@ func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeLis } // Convert_core_NodeList_To_v1_NodeList is an autogenerated conversion function. -func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { +func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *corev1.NodeList, s conversion.Scope) error { return autoConvert_core_NodeList_To_v1_NodeList(in, out, s) } -func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { +func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *corev1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil } // Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions is an autogenerated conversion function. -func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { +func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *corev1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { return autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in, out, s) } -func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { +func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *corev1.NodeProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil } // Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. -func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { +func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *corev1.NodeProxyOptions, s conversion.Scope) error { return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) } -func autoConvert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *v1.NodeProxyOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *corev1.NodeProxyOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 { @@ -5227,75 +5232,75 @@ func autoConvert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *v1.NodeP } // Convert_url_Values_To_v1_NodeProxyOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *v1.NodeProxyOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *corev1.NodeProxyOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_NodeProxyOptions(in, out, s) } -func autoConvert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in *v1.NodeRuntimeHandler, out *core.NodeRuntimeHandler, s conversion.Scope) error { +func autoConvert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in *corev1.NodeRuntimeHandler, out *core.NodeRuntimeHandler, s conversion.Scope) error { out.Name = in.Name out.Features = (*core.NodeRuntimeHandlerFeatures)(unsafe.Pointer(in.Features)) return nil } // Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler is an autogenerated conversion function. -func Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in *v1.NodeRuntimeHandler, out *core.NodeRuntimeHandler, s conversion.Scope) error { +func Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in *corev1.NodeRuntimeHandler, out *core.NodeRuntimeHandler, s conversion.Scope) error { return autoConvert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in, out, s) } -func autoConvert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRuntimeHandler, out *v1.NodeRuntimeHandler, s conversion.Scope) error { +func autoConvert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRuntimeHandler, out *corev1.NodeRuntimeHandler, s conversion.Scope) error { out.Name = in.Name - out.Features = (*v1.NodeRuntimeHandlerFeatures)(unsafe.Pointer(in.Features)) + out.Features = (*corev1.NodeRuntimeHandlerFeatures)(unsafe.Pointer(in.Features)) return nil } // Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler is an autogenerated conversion function. -func Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRuntimeHandler, out *v1.NodeRuntimeHandler, s conversion.Scope) error { +func Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRuntimeHandler, out *corev1.NodeRuntimeHandler, s conversion.Scope) error { return autoConvert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in, out, s) } -func autoConvert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *v1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error { +func autoConvert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *corev1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error { out.RecursiveReadOnlyMounts = (*bool)(unsafe.Pointer(in.RecursiveReadOnlyMounts)) out.UserNamespaces = (*bool)(unsafe.Pointer(in.UserNamespaces)) return nil } // Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures is an autogenerated conversion function. -func Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *v1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error { +func Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *corev1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error { return autoConvert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in, out, s) } -func autoConvert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *v1.NodeRuntimeHandlerFeatures, s conversion.Scope) error { +func autoConvert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *corev1.NodeRuntimeHandlerFeatures, s conversion.Scope) error { out.RecursiveReadOnlyMounts = (*bool)(unsafe.Pointer(in.RecursiveReadOnlyMounts)) out.UserNamespaces = (*bool)(unsafe.Pointer(in.UserNamespaces)) return nil } // Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures is an autogenerated conversion function. -func Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *v1.NodeRuntimeHandlerFeatures, s conversion.Scope) error { +func Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *corev1.NodeRuntimeHandlerFeatures, s conversion.Scope) error { return autoConvert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in, out, s) } -func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { +func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *corev1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) return nil } // Convert_v1_NodeSelector_To_core_NodeSelector is an autogenerated conversion function. -func Convert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { +func Convert_v1_NodeSelector_To_core_NodeSelector(in *corev1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { return autoConvert_v1_NodeSelector_To_core_NodeSelector(in, out, s) } -func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) +func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *corev1.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]corev1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) return nil } // Convert_core_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. -func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { +func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *corev1.NodeSelector, s conversion.Scope) error { return autoConvert_core_NodeSelector_To_v1_NodeSelector(in, out, s) } -func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { +func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *corev1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { out.Key = in.Key out.Operator = core.NodeSelectorOperator(in.Operator) out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) @@ -5303,45 +5308,45 @@ func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in * } // Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { +func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *corev1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { return autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in, out, s) } -func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { +func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *corev1.NodeSelectorRequirement, s conversion.Scope) error { out.Key = in.Key - out.Operator = v1.NodeSelectorOperator(in.Operator) + out.Operator = corev1.NodeSelectorOperator(in.Operator) out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) return nil } // Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { +func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *corev1.NodeSelectorRequirement, s conversion.Scope) error { return autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) } -func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { +func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *corev1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { out.MatchExpressions = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) out.MatchFields = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields)) return nil } // Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm is an autogenerated conversion function. -func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { +func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *corev1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { return autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in, out, s) } -func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - out.MatchFields = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields)) +func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *corev1.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]corev1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + out.MatchFields = *(*[]corev1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields)) return nil } // Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. -func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { +func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *corev1.NodeSelectorTerm, s conversion.Scope) error { return autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) } -func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { +func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *corev1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { // WARNING: in.PodCIDR requires manual conversion: does not exist in peer-type out.PodCIDRs = *(*[]string)(unsafe.Pointer(&in.PodCIDRs)) out.ProviderID = in.ProviderID @@ -5352,17 +5357,17 @@ func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpe return nil } -func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { +func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *corev1.NodeSpec, s conversion.Scope) error { out.PodCIDRs = *(*[]string)(unsafe.Pointer(&in.PodCIDRs)) out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable - out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*corev1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) out.DoNotUseExternalID = in.DoNotUseExternalID return nil } -func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { +func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *corev1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) out.Allocatable = *(*core.ResourceList)(unsafe.Pointer(&in.Allocatable)) out.Phase = core.NodePhase(in.Phase) @@ -5384,37 +5389,37 @@ func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.N } // Convert_v1_NodeStatus_To_core_NodeStatus is an autogenerated conversion function. -func Convert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { +func Convert_v1_NodeStatus_To_core_NodeStatus(in *corev1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { return autoConvert_v1_NodeStatus_To_core_NodeStatus(in, out, s) } -func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = v1.NodePhase(in.Phase) - out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) +func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *corev1.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*corev1.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = corev1.NodePhase(in.Phase) + out.Conditions = *(*[]corev1.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]corev1.NodeAddress)(unsafe.Pointer(&in.Addresses)) if err := Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { return err } if err := Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { return err } - out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - out.Config = (*v1.NodeConfigStatus)(unsafe.Pointer(in.Config)) - out.RuntimeHandlers = *(*[]v1.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers)) - out.Features = (*v1.NodeFeatures)(unsafe.Pointer(in.Features)) + out.Images = *(*[]corev1.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]corev1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]corev1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + out.Config = (*corev1.NodeConfigStatus)(unsafe.Pointer(in.Config)) + out.RuntimeHandlers = *(*[]corev1.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers)) + out.Features = (*corev1.NodeFeatures)(unsafe.Pointer(in.Features)) return nil } // Convert_core_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. -func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { +func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *corev1.NodeStatus, s conversion.Scope) error { return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s) } -func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { +func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *corev1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { out.MachineID = in.MachineID out.SystemUUID = in.SystemUUID out.BootID = in.BootID @@ -5429,11 +5434,11 @@ func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, } // Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo is an autogenerated conversion function. -func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { +func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *corev1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { return autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in, out, s) } -func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { +func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *corev1.NodeSystemInfo, s conversion.Scope) error { out.MachineID = in.MachineID out.SystemUUID = in.SystemUUID out.BootID = in.BootID @@ -5448,33 +5453,33 @@ func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInf } // Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. -func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { +func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *corev1.NodeSystemInfo, s conversion.Scope) error { return autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) } -func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { +func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *corev1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { out.APIVersion = in.APIVersion out.FieldPath = in.FieldPath return nil } // Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector is an autogenerated conversion function. -func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { +func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *corev1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { return autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in, out, s) } -func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { +func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *corev1.ObjectFieldSelector, s conversion.Scope) error { out.APIVersion = in.APIVersion out.FieldPath = in.FieldPath return nil } // Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. -func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { +func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *corev1.ObjectFieldSelector, s conversion.Scope) error { return autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) } -func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { +func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *corev1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name @@ -5486,11 +5491,11 @@ func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReferen } // Convert_v1_ObjectReference_To_core_ObjectReference is an autogenerated conversion function. -func Convert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { +func Convert_v1_ObjectReference_To_core_ObjectReference(in *corev1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { return autoConvert_v1_ObjectReference_To_core_ObjectReference(in, out, s) } -func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { +func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *corev1.ObjectReference, s conversion.Scope) error { out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name @@ -5502,11 +5507,11 @@ func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectRefer } // Convert_core_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. -func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { +func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *corev1.ObjectReference, s conversion.Scope) error { return autoConvert_core_ObjectReference_To_v1_ObjectReference(in, out, s) } -func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { +func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *corev1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5518,11 +5523,11 @@ func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentV } // Convert_v1_PersistentVolume_To_core_PersistentVolume is an autogenerated conversion function. -func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { +func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *corev1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { return autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in, out, s) } -func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { +func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *corev1.PersistentVolume, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5534,11 +5539,11 @@ func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.Persisten } // Convert_core_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. -func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { +func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *corev1.PersistentVolume, s conversion.Scope) error { return autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in, out, s) } -func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *corev1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5550,11 +5555,11 @@ func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.P } // Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *corev1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in, out, s) } -func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { +func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *corev1.PersistentVolumeClaim, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5566,11 +5571,11 @@ func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core } // Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *corev1.PersistentVolumeClaim, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *corev1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { out.Type = core.PersistentVolumeClaimConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastProbeTime = in.LastProbeTime @@ -5581,13 +5586,13 @@ func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaim } // Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *corev1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in, out, s) } -func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = v1.PersistentVolumeClaimConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) +func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *corev1.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = corev1.PersistentVolumeClaimConditionType(in.Type) + out.Status = corev1.ConditionStatus(in.Status) out.LastProbeTime = in.LastProbeTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -5596,33 +5601,33 @@ func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaim } // Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *corev1.PersistentVolumeClaimCondition, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *corev1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *corev1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in, out, s) } -func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { +func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *corev1.PersistentVolumeClaimList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *corev1.PersistentVolumeClaimList, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *corev1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil { @@ -5638,31 +5643,31 @@ func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec( } // Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *corev1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in, out, s) } -func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) +func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *corev1.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]corev1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) - out.DataSource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource)) - out.DataSourceRef = (*v1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef)) + out.VolumeMode = (*corev1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + out.DataSource = (*corev1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource)) + out.DataSourceRef = (*corev1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef)) out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName)) return nil } // Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *corev1.PersistentVolumeClaimSpec, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *corev1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { out.Phase = core.PersistentVolumeClaimPhase(in.Phase) out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) @@ -5675,28 +5680,28 @@ func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimSta } // Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *corev1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in, out, s) } -func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources)) - out.AllocatedResourceStatuses = *(*map[v1.ResourceName]v1.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses)) +func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *corev1.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = corev1.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]corev1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]corev1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + out.AllocatedResources = *(*corev1.ResourceList)(unsafe.Pointer(&in.AllocatedResources)) + out.AllocatedResourceStatuses = *(*map[corev1.ResourceName]corev1.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses)) out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName)) - out.ModifyVolumeStatus = (*v1.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus)) + out.ModifyVolumeStatus = (*corev1.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus)) return nil } // Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *corev1.PersistentVolumeClaimStatus, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *v1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *corev1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5705,11 +5710,11 @@ func autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimT } // Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *v1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *corev1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in, out, s) } -func autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *v1.PersistentVolumeClaimTemplate, s conversion.Scope) error { +func autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *corev1.PersistentVolumeClaimTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5718,33 +5723,33 @@ func autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimT } // Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *v1.PersistentVolumeClaimTemplate, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *corev1.PersistentVolumeClaimTemplate, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *corev1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { out.ClaimName = in.ClaimName out.ReadOnly = in.ReadOnly return nil } // Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { +func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *corev1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in, out, s) } -func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { +func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *corev1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { out.ClaimName = in.ClaimName out.ReadOnly = in.ReadOnly return nil } // Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { +func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *corev1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { return autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) } -func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *corev1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -5761,15 +5766,15 @@ func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.Per } // Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { +func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *corev1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in, out, s) } -func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { +func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *corev1.PersistentVolumeList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.PersistentVolume, len(*in)) + *out = make([]corev1.PersistentVolume, len(*in)) for i := range *in { if err := Convert_core_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -5782,11 +5787,11 @@ func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.P } // Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. -func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { +func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *corev1.PersistentVolumeList, s conversion.Scope) error { return autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) } -func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *corev1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) @@ -5813,42 +5818,42 @@ func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1 } // Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *corev1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in, out, s) } -func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*v1.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.FlexVolume = (*v1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderPersistentVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - out.CSI = (*v1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) +func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *corev1.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*corev1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*corev1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*corev1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*corev1.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*corev1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*corev1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*corev1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*corev1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*corev1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*corev1.CinderPersistentVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*corev1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*corev1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*corev1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*corev1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*corev1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*corev1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*corev1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*corev1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*corev1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*corev1.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*corev1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*corev1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) return nil } // Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. -func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { +func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *corev1.PersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) } -func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *corev1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) if err := Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { return err @@ -5864,23 +5869,23 @@ func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.Per return nil } -func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) +func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *corev1.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) if err := Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { return err } - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.AccessModes = *(*[]corev1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*corev1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = corev1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) out.StorageClassName = in.StorageClassName out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) - out.NodeAffinity = (*v1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.VolumeMode = (*corev1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + out.NodeAffinity = (*corev1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity)) out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName)) return nil } -func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { +func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *corev1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { out.Phase = core.PersistentVolumePhase(in.Phase) out.Message = in.Message out.Reason = in.Reason @@ -5889,12 +5894,12 @@ func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1 } // Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { +func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *corev1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { return autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in, out, s) } -func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumePhase(in.Phase) +func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *corev1.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = corev1.PersistentVolumePhase(in.Phase) out.Message = in.Message out.Reason = in.Reason out.LastPhaseTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastPhaseTransitionTime)) @@ -5902,33 +5907,33 @@ func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *co } // Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { +func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *corev1.PersistentVolumeStatus, s conversion.Scope) error { return autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) } -func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *corev1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { out.PdID = in.PdID out.FSType = in.FSType return nil } // Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { +func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *corev1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in, out, s) } -func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { +func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *corev1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { out.PdID = in.PdID out.FSType = in.FSType return nil } // Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { +func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *corev1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { return autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) } -func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { +func autoConvert_v1_Pod_To_core_Pod(in *corev1.Pod, out *core.Pod, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5939,7 +5944,7 @@ func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scop return nil } -func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { +func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *corev1.Pod, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -5950,29 +5955,29 @@ func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scop return nil } -func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { +func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *corev1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) return nil } // Convert_v1_PodAffinity_To_core_PodAffinity is an autogenerated conversion function. -func Convert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { +func Convert_v1_PodAffinity_To_core_PodAffinity(in *corev1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { return autoConvert_v1_PodAffinity_To_core_PodAffinity(in, out, s) } -func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) +func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *corev1.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) return nil } // Convert_core_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. -func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { +func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *corev1.PodAffinity, s conversion.Scope) error { return autoConvert_core_PodAffinity_To_v1_PodAffinity(in, out, s) } -func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { +func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *corev1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey @@ -5983,11 +5988,11 @@ func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTe } // Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm is an autogenerated conversion function. -func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { +func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *corev1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { return autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in, out, s) } -func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { +func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *corev1.PodAffinityTerm, s conversion.Scope) error { out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey @@ -5998,33 +6003,33 @@ func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinity } // Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. -func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { +func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *corev1.PodAffinityTerm, s conversion.Scope) error { return autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) } -func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { +func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *corev1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) return nil } // Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity is an autogenerated conversion function. -func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { +func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *corev1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { return autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in, out, s) } -func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) +func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *corev1.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) return nil } // Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. -func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { +func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *corev1.PodAntiAffinity, s conversion.Scope) error { return autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) } -func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { +func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *corev1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr @@ -6034,11 +6039,11 @@ func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOp } // Convert_v1_PodAttachOptions_To_core_PodAttachOptions is an autogenerated conversion function. -func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { +func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *corev1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { return autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in, out, s) } -func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { +func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *corev1.PodAttachOptions, s conversion.Scope) error { out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr @@ -6048,11 +6053,11 @@ func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttach } // Convert_core_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. -func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { +func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *corev1.PodAttachOptions, s conversion.Scope) error { return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) } -func autoConvert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *v1.PodAttachOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *corev1.PodAttachOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["stdin"]; ok && len(values) > 0 { @@ -6094,11 +6099,11 @@ func autoConvert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *v1.PodAt } // Convert_url_Values_To_v1_PodAttachOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *v1.PodAttachOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *corev1.PodAttachOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_PodAttachOptions(in, out, s) } -func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { +func autoConvert_v1_PodCondition_To_core_PodCondition(in *corev1.PodCondition, out *core.PodCondition, s conversion.Scope) error { out.Type = core.PodConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastProbeTime = in.LastProbeTime @@ -6109,13 +6114,13 @@ func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out * } // Convert_v1_PodCondition_To_core_PodCondition is an autogenerated conversion function. -func Convert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { +func Convert_v1_PodCondition_To_core_PodCondition(in *corev1.PodCondition, out *core.PodCondition, s conversion.Scope) error { return autoConvert_v1_PodCondition_To_core_PodCondition(in, out, s) } -func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - out.Type = v1.PodConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) +func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *corev1.PodCondition, s conversion.Scope) error { + out.Type = corev1.PodConditionType(in.Type) + out.Status = corev1.ConditionStatus(in.Status) out.LastProbeTime = in.LastProbeTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -6124,11 +6129,11 @@ func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out } // Convert_core_PodCondition_To_v1_PodCondition is an autogenerated conversion function. -func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { +func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *corev1.PodCondition, s conversion.Scope) error { return autoConvert_core_PodCondition_To_v1_PodCondition(in, out, s) } -func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { +func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *corev1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) out.Options = *(*[]core.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) @@ -6136,45 +6141,45 @@ func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out * } // Convert_v1_PodDNSConfig_To_core_PodDNSConfig is an autogenerated conversion function. -func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { +func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *corev1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { return autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in, out, s) } -func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { +func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *corev1.PodDNSConfig, s conversion.Scope) error { out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) - out.Options = *(*[]v1.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + out.Options = *(*[]corev1.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) return nil } // Convert_core_PodDNSConfig_To_v1_PodDNSConfig is an autogenerated conversion function. -func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { +func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *corev1.PodDNSConfig, s conversion.Scope) error { return autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in, out, s) } -func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { +func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *corev1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { out.Name = in.Name out.Value = (*string)(unsafe.Pointer(in.Value)) return nil } // Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption is an autogenerated conversion function. -func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { +func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *corev1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { return autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in, out, s) } -func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { +func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *corev1.PodDNSConfigOption, s conversion.Scope) error { out.Name = in.Name out.Value = (*string)(unsafe.Pointer(in.Value)) return nil } // Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption is an autogenerated conversion function. -func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { +func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *corev1.PodDNSConfigOption, s conversion.Scope) error { return autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in, out, s) } -func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { +func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *corev1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr @@ -6185,11 +6190,11 @@ func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, } // Convert_v1_PodExecOptions_To_core_PodExecOptions is an autogenerated conversion function. -func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { +func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *corev1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { return autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in, out, s) } -func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { +func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *corev1.PodExecOptions, s conversion.Scope) error { out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr @@ -6200,11 +6205,11 @@ func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOption } // Convert_core_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. -func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { +func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *corev1.PodExecOptions, s conversion.Scope) error { return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s) } -func autoConvert_url_Values_To_v1_PodExecOptions(in *url.Values, out *v1.PodExecOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_PodExecOptions(in *url.Values, out *corev1.PodExecOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["stdin"]; ok && len(values) > 0 { @@ -6251,31 +6256,31 @@ func autoConvert_url_Values_To_v1_PodExecOptions(in *url.Values, out *v1.PodExec } // Convert_url_Values_To_v1_PodExecOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_PodExecOptions(in *url.Values, out *v1.PodExecOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_PodExecOptions(in *url.Values, out *corev1.PodExecOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_PodExecOptions(in, out, s) } -func autoConvert_v1_PodIP_To_core_PodIP(in *v1.PodIP, out *core.PodIP, s conversion.Scope) error { +func autoConvert_v1_PodIP_To_core_PodIP(in *corev1.PodIP, out *core.PodIP, s conversion.Scope) error { out.IP = in.IP return nil } // Convert_v1_PodIP_To_core_PodIP is an autogenerated conversion function. -func Convert_v1_PodIP_To_core_PodIP(in *v1.PodIP, out *core.PodIP, s conversion.Scope) error { +func Convert_v1_PodIP_To_core_PodIP(in *corev1.PodIP, out *core.PodIP, s conversion.Scope) error { return autoConvert_v1_PodIP_To_core_PodIP(in, out, s) } -func autoConvert_core_PodIP_To_v1_PodIP(in *core.PodIP, out *v1.PodIP, s conversion.Scope) error { +func autoConvert_core_PodIP_To_v1_PodIP(in *core.PodIP, out *corev1.PodIP, s conversion.Scope) error { out.IP = in.IP return nil } // Convert_core_PodIP_To_v1_PodIP is an autogenerated conversion function. -func Convert_core_PodIP_To_v1_PodIP(in *core.PodIP, out *v1.PodIP, s conversion.Scope) error { +func Convert_core_PodIP_To_v1_PodIP(in *core.PodIP, out *corev1.PodIP, s conversion.Scope) error { return autoConvert_core_PodIP_To_v1_PodIP(in, out, s) } -func autoConvert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { +func autoConvert_v1_PodList_To_core_PodList(in *corev1.PodList, out *core.PodList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -6292,15 +6297,15 @@ func autoConvert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s } // Convert_v1_PodList_To_core_PodList is an autogenerated conversion function. -func Convert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { +func Convert_v1_PodList_To_core_PodList(in *corev1.PodList, out *core.PodList, s conversion.Scope) error { return autoConvert_v1_PodList_To_core_PodList(in, out, s) } -func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { +func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *corev1.PodList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.Pod, len(*in)) + *out = make([]corev1.Pod, len(*in)) for i := range *in { if err := Convert_core_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -6313,11 +6318,11 @@ func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s } // Convert_core_PodList_To_v1_PodList is an autogenerated conversion function. -func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { +func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *corev1.PodList, s conversion.Scope) error { return autoConvert_core_PodList_To_v1_PodList(in, out, s) } -func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { +func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *corev1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { out.Container = in.Container out.Follow = in.Follow out.Previous = in.Previous @@ -6327,15 +6332,16 @@ func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, ou out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) out.InsecureSkipTLSVerifyBackend = in.InsecureSkipTLSVerifyBackend + out.Stream = (*string)(unsafe.Pointer(in.Stream)) return nil } // Convert_v1_PodLogOptions_To_core_PodLogOptions is an autogenerated conversion function. -func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { +func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *corev1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { return autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in, out, s) } -func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { +func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *corev1.PodLogOptions, s conversion.Scope) error { out.Container = in.Container out.Follow = in.Follow out.Previous = in.Previous @@ -6345,15 +6351,16 @@ func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) out.InsecureSkipTLSVerifyBackend = in.InsecureSkipTLSVerifyBackend + out.Stream = (*string)(unsafe.Pointer(in.Stream)) return nil } // Convert_core_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. -func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { +func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *corev1.PodLogOptions, s conversion.Scope) error { return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s) } -func autoConvert_url_Values_To_v1_PodLogOptions(in *url.Values, out *v1.PodLogOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_PodLogOptions(in *url.Values, out *corev1.PodLogOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 { @@ -6419,55 +6426,62 @@ func autoConvert_url_Values_To_v1_PodLogOptions(in *url.Values, out *v1.PodLogOp } else { out.InsecureSkipTLSVerifyBackend = false } + if values, ok := map[string][]string(*in)["stream"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_Pointer_string(&values, &out.Stream, s); err != nil { + return err + } + } else { + out.Stream = nil + } return nil } // Convert_url_Values_To_v1_PodLogOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_PodLogOptions(in *url.Values, out *v1.PodLogOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_PodLogOptions(in *url.Values, out *corev1.PodLogOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_PodLogOptions(in, out, s) } -func autoConvert_v1_PodOS_To_core_PodOS(in *v1.PodOS, out *core.PodOS, s conversion.Scope) error { +func autoConvert_v1_PodOS_To_core_PodOS(in *corev1.PodOS, out *core.PodOS, s conversion.Scope) error { out.Name = core.OSName(in.Name) return nil } // Convert_v1_PodOS_To_core_PodOS is an autogenerated conversion function. -func Convert_v1_PodOS_To_core_PodOS(in *v1.PodOS, out *core.PodOS, s conversion.Scope) error { +func Convert_v1_PodOS_To_core_PodOS(in *corev1.PodOS, out *core.PodOS, s conversion.Scope) error { return autoConvert_v1_PodOS_To_core_PodOS(in, out, s) } -func autoConvert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *v1.PodOS, s conversion.Scope) error { - out.Name = v1.OSName(in.Name) +func autoConvert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *corev1.PodOS, s conversion.Scope) error { + out.Name = corev1.OSName(in.Name) return nil } // Convert_core_PodOS_To_v1_PodOS is an autogenerated conversion function. -func Convert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *v1.PodOS, s conversion.Scope) error { +func Convert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *corev1.PodOS, s conversion.Scope) error { return autoConvert_core_PodOS_To_v1_PodOS(in, out, s) } -func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { +func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *corev1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) return nil } // Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions is an autogenerated conversion function. -func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { +func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *corev1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { return autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in, out, s) } -func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { +func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *corev1.PodPortForwardOptions, s conversion.Scope) error { out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) return nil } // Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. -func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { +func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *corev1.PodPortForwardOptions, s conversion.Scope) error { return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) } -func autoConvert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *v1.PodPortForwardOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *corev1.PodPortForwardOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["ports"]; ok && len(values) > 0 { @@ -6481,31 +6495,31 @@ func autoConvert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *v1. } // Convert_url_Values_To_v1_PodPortForwardOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *v1.PodPortForwardOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *corev1.PodPortForwardOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_PodPortForwardOptions(in, out, s) } -func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { +func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *corev1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil } // Convert_v1_PodProxyOptions_To_core_PodProxyOptions is an autogenerated conversion function. -func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { +func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *corev1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { return autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in, out, s) } -func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { +func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *corev1.PodProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil } // Convert_core_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. -func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { +func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *corev1.PodProxyOptions, s conversion.Scope) error { return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) } -func autoConvert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *v1.PodProxyOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *corev1.PodProxyOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 { @@ -6519,31 +6533,31 @@ func autoConvert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *v1.PodPro } // Convert_url_Values_To_v1_PodProxyOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *v1.PodProxyOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *corev1.PodProxyOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_PodProxyOptions(in, out, s) } -func autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in *v1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { +func autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in *corev1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { out.ConditionType = core.PodConditionType(in.ConditionType) return nil } // Convert_v1_PodReadinessGate_To_core_PodReadinessGate is an autogenerated conversion function. -func Convert_v1_PodReadinessGate_To_core_PodReadinessGate(in *v1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { +func Convert_v1_PodReadinessGate_To_core_PodReadinessGate(in *corev1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { return autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in, out, s) } -func autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *v1.PodReadinessGate, s conversion.Scope) error { - out.ConditionType = v1.PodConditionType(in.ConditionType) +func autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *corev1.PodReadinessGate, s conversion.Scope) error { + out.ConditionType = corev1.PodConditionType(in.ConditionType) return nil } // Convert_core_PodReadinessGate_To_v1_PodReadinessGate is an autogenerated conversion function. -func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *v1.PodReadinessGate, s conversion.Scope) error { +func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *corev1.PodReadinessGate, s conversion.Scope) error { return autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in, out, s) } -func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error { +func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *corev1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error { out.Name = in.Name out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName)) out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName)) @@ -6551,11 +6565,11 @@ func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResource } // Convert_v1_PodResourceClaim_To_core_PodResourceClaim is an autogenerated conversion function. -func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error { +func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *corev1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error { return autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in, out, s) } -func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error { +func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *corev1.PodResourceClaim, s conversion.Scope) error { out.Name = in.Name out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName)) out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName)) @@ -6563,53 +6577,53 @@ func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResour } // Convert_core_PodResourceClaim_To_v1_PodResourceClaim is an autogenerated conversion function. -func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error { +func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *corev1.PodResourceClaim, s conversion.Scope) error { return autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in, out, s) } -func autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *v1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error { +func autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *corev1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error { out.Name = in.Name out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName)) return nil } // Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus is an autogenerated conversion function. -func Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *v1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error { +func Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *corev1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error { return autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in, out, s) } -func autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *v1.PodResourceClaimStatus, s conversion.Scope) error { +func autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *corev1.PodResourceClaimStatus, s conversion.Scope) error { out.Name = in.Name out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName)) return nil } // Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus is an autogenerated conversion function. -func Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *v1.PodResourceClaimStatus, s conversion.Scope) error { +func Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *corev1.PodResourceClaimStatus, s conversion.Scope) error { return autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in, out, s) } -func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error { +func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *corev1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error { out.Name = in.Name return nil } // Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate is an autogenerated conversion function. -func Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error { +func Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *corev1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error { return autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in, out, s) } -func autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *v1.PodSchedulingGate, s conversion.Scope) error { +func autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *corev1.PodSchedulingGate, s conversion.Scope) error { out.Name = in.Name return nil } // Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate is an autogenerated conversion function. -func Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *v1.PodSchedulingGate, s conversion.Scope) error { +func Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *corev1.PodSchedulingGate, s conversion.Scope) error { return autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in, out, s) } -func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { +func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *corev1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) out.WindowsOptions = (*core.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions)) out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) @@ -6622,61 +6636,63 @@ func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecu out.FSGroupChangePolicy = (*core.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy)) out.SeccompProfile = (*core.SeccompProfile)(unsafe.Pointer(in.SeccompProfile)) out.AppArmorProfile = (*core.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile)) + out.SELinuxChangePolicy = (*core.PodSELinuxChangePolicy)(unsafe.Pointer(in.SELinuxChangePolicy)) return nil } // Convert_v1_PodSecurityContext_To_core_PodSecurityContext is an autogenerated conversion function. -func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { +func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *corev1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { return autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in, out, s) } -func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { +func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *corev1.PodSecurityContext, s conversion.Scope) error { // INFO: in.HostNetwork opted out of conversion generation // INFO: in.HostPID opted out of conversion generation // INFO: in.HostIPC opted out of conversion generation // INFO: in.ShareProcessNamespace opted out of conversion generation // INFO: in.HostUsers opted out of conversion generation - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.WindowsOptions = (*v1.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions)) + out.SELinuxOptions = (*corev1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.WindowsOptions = (*corev1.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions)) out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup)) out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.SupplementalGroupsPolicy = (*v1.SupplementalGroupsPolicy)(unsafe.Pointer(in.SupplementalGroupsPolicy)) + out.SupplementalGroupsPolicy = (*corev1.SupplementalGroupsPolicy)(unsafe.Pointer(in.SupplementalGroupsPolicy)) out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - out.FSGroupChangePolicy = (*v1.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy)) - out.Sysctls = *(*[]v1.Sysctl)(unsafe.Pointer(&in.Sysctls)) - out.SeccompProfile = (*v1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile)) - out.AppArmorProfile = (*v1.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile)) + out.FSGroupChangePolicy = (*corev1.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy)) + out.Sysctls = *(*[]corev1.Sysctl)(unsafe.Pointer(&in.Sysctls)) + out.SeccompProfile = (*corev1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile)) + out.AppArmorProfile = (*corev1.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile)) + out.SELinuxChangePolicy = (*corev1.PodSELinuxChangePolicy)(unsafe.Pointer(in.SELinuxChangePolicy)) return nil } // Convert_core_PodSecurityContext_To_v1_PodSecurityContext is an autogenerated conversion function. -func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { +func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *corev1.PodSecurityContext, s conversion.Scope) error { return autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in, out, s) } -func autoConvert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { +func autoConvert_v1_PodSignature_To_core_PodSignature(in *corev1.PodSignature, out *core.PodSignature, s conversion.Scope) error { out.PodController = (*metav1.OwnerReference)(unsafe.Pointer(in.PodController)) return nil } // Convert_v1_PodSignature_To_core_PodSignature is an autogenerated conversion function. -func Convert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { +func Convert_v1_PodSignature_To_core_PodSignature(in *corev1.PodSignature, out *core.PodSignature, s conversion.Scope) error { return autoConvert_v1_PodSignature_To_core_PodSignature(in, out, s) } -func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { +func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *corev1.PodSignature, s conversion.Scope) error { out.PodController = (*metav1.OwnerReference)(unsafe.Pointer(in.PodController)) return nil } // Convert_core_PodSignature_To_v1_PodSignature is an autogenerated conversion function. -func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { +func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *corev1.PodSignature, s conversion.Scope) error { return autoConvert_core_PodSignature_To_v1_PodSignature(in, out, s) } -func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { +func autoConvert_v1_PodSpec_To_core_PodSpec(in *corev1.PodSpec, out *core.PodSpec, s conversion.Scope) error { if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]core.Volume, len(*in)) @@ -6734,13 +6750,14 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s // INFO: in.HostUsers opted out of conversion generation out.SchedulingGates = *(*[]core.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates)) out.ResourceClaims = *(*[]core.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims)) + out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources)) return nil } -func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { +func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *corev1.PodSpec, s conversion.Scope) error { if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { if err := Convert_core_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -6749,50 +6766,51 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s } else { out.Volumes = nil } - out.InitContainers = *(*[]v1.Container)(unsafe.Pointer(&in.InitContainers)) - out.Containers = *(*[]v1.Container)(unsafe.Pointer(&in.Containers)) - out.EphemeralContainers = *(*[]v1.EphemeralContainer)(unsafe.Pointer(&in.EphemeralContainers)) - out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) + out.InitContainers = *(*[]corev1.Container)(unsafe.Pointer(&in.InitContainers)) + out.Containers = *(*[]corev1.Container)(unsafe.Pointer(&in.Containers)) + out.EphemeralContainers = *(*[]corev1.EphemeralContainer)(unsafe.Pointer(&in.EphemeralContainers)) + out.RestartPolicy = corev1.RestartPolicy(in.RestartPolicy) out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) + out.DNSPolicy = corev1.DNSPolicy(in.DNSPolicy) out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) out.ServiceAccountName = in.ServiceAccountName out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) out.NodeName = in.NodeName if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.PodSecurityContext) + *out = new(corev1.PodSecurityContext) if err := Convert_core_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { return err } } else { out.SecurityContext = nil } - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.ImagePullSecrets = *(*[]corev1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) out.Hostname = in.Hostname out.Subdomain = in.Subdomain out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN)) - out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) + out.Affinity = (*corev1.Affinity)(unsafe.Pointer(in.Affinity)) out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.Tolerations = *(*[]corev1.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]corev1.HostAlias)(unsafe.Pointer(&in.HostAliases)) out.PriorityClassName = in.PriorityClassName out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - out.PreemptionPolicy = (*v1.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy)) - out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) - out.ReadinessGates = *(*[]v1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) + out.PreemptionPolicy = (*corev1.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy)) + out.DNSConfig = (*corev1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + out.ReadinessGates = *(*[]corev1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) - out.Overhead = *(*v1.ResourceList)(unsafe.Pointer(&in.Overhead)) + out.Overhead = *(*corev1.ResourceList)(unsafe.Pointer(&in.Overhead)) out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) - out.TopologySpreadConstraints = *(*[]v1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints)) - out.OS = (*v1.PodOS)(unsafe.Pointer(in.OS)) - out.SchedulingGates = *(*[]v1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates)) - out.ResourceClaims = *(*[]v1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims)) + out.TopologySpreadConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints)) + out.OS = (*corev1.PodOS)(unsafe.Pointer(in.OS)) + out.SchedulingGates = *(*[]corev1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates)) + out.ResourceClaims = *(*[]corev1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims)) + out.Resources = (*corev1.ResourceRequirements)(unsafe.Pointer(in.Resources)) return nil } -func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { +func autoConvert_v1_PodStatus_To_core_PodStatus(in *corev1.PodStatus, out *core.PodStatus, s conversion.Scope) error { out.Phase = core.PodPhase(in.Phase) out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions)) out.Message = in.Message @@ -6812,26 +6830,26 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS return nil } -func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - out.Phase = v1.PodPhase(in.Phase) - out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) +func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *corev1.PodStatus, s conversion.Scope) error { + out.Phase = corev1.PodPhase(in.Phase) + out.Conditions = *(*[]corev1.PodCondition)(unsafe.Pointer(&in.Conditions)) out.Message = in.Message out.Reason = in.Reason out.NominatedNodeName = in.NominatedNodeName out.HostIP = in.HostIP - out.HostIPs = *(*[]v1.HostIP)(unsafe.Pointer(&in.HostIPs)) - out.PodIPs = *(*[]v1.PodIP)(unsafe.Pointer(&in.PodIPs)) + out.HostIPs = *(*[]corev1.HostIP)(unsafe.Pointer(&in.HostIPs)) + out.PodIPs = *(*[]corev1.PodIP)(unsafe.Pointer(&in.PodIPs)) out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime)) - out.QOSClass = v1.PodQOSClass(in.QOSClass) - out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - out.EphemeralContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses)) - out.Resize = v1.PodResizeStatus(in.Resize) - out.ResourceClaimStatuses = *(*[]v1.PodResourceClaimStatus)(unsafe.Pointer(&in.ResourceClaimStatuses)) + out.QOSClass = corev1.PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]corev1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]corev1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.EphemeralContainerStatuses = *(*[]corev1.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses)) + out.Resize = corev1.PodResizeStatus(in.Resize) + out.ResourceClaimStatuses = *(*[]corev1.PodResourceClaimStatus)(unsafe.Pointer(&in.ResourceClaimStatuses)) return nil } -func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { +func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *corev1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { return err @@ -6840,11 +6858,11 @@ func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResu } // Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function. -func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { +func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *corev1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s) } -func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { +func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *corev1.PodStatusResult, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { return err @@ -6853,11 +6871,11 @@ func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusRe } // Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. -func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { +func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *corev1.PodStatusResult, s conversion.Scope) error { return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s) } -func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { +func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *corev1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err @@ -6866,11 +6884,11 @@ func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *cor } // Convert_v1_PodTemplate_To_core_PodTemplate is an autogenerated conversion function. -func Convert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { +func Convert_v1_PodTemplate_To_core_PodTemplate(in *corev1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { return autoConvert_v1_PodTemplate_To_core_PodTemplate(in, out, s) } -func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { +func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *corev1.PodTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err @@ -6879,11 +6897,11 @@ func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v } // Convert_core_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. -func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { +func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *corev1.PodTemplate, s conversion.Scope) error { return autoConvert_core_PodTemplate_To_v1_PodTemplate(in, out, s) } -func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { +func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *corev1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -6900,15 +6918,15 @@ func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateLi } // Convert_v1_PodTemplateList_To_core_PodTemplateList is an autogenerated conversion function. -func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { +func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *corev1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { return autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in, out, s) } -func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { +func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *corev1.PodTemplateList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.PodTemplate, len(*in)) + *out = make([]corev1.PodTemplate, len(*in)) for i := range *in { if err := Convert_core_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -6921,11 +6939,11 @@ func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplate } // Convert_core_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. -func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { +func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *corev1.PodTemplateList, s conversion.Scope) error { return autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in, out, s) } -func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { +func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *corev1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -6933,7 +6951,7 @@ func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSp return nil } -func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { +func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *corev1.PodTemplateSpec, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -6941,7 +6959,7 @@ func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplate return nil } -func autoConvert_v1_PortStatus_To_core_PortStatus(in *v1.PortStatus, out *core.PortStatus, s conversion.Scope) error { +func autoConvert_v1_PortStatus_To_core_PortStatus(in *corev1.PortStatus, out *core.PortStatus, s conversion.Scope) error { out.Port = in.Port out.Protocol = core.Protocol(in.Protocol) out.Error = (*string)(unsafe.Pointer(in.Error)) @@ -6949,23 +6967,23 @@ func autoConvert_v1_PortStatus_To_core_PortStatus(in *v1.PortStatus, out *core.P } // Convert_v1_PortStatus_To_core_PortStatus is an autogenerated conversion function. -func Convert_v1_PortStatus_To_core_PortStatus(in *v1.PortStatus, out *core.PortStatus, s conversion.Scope) error { +func Convert_v1_PortStatus_To_core_PortStatus(in *corev1.PortStatus, out *core.PortStatus, s conversion.Scope) error { return autoConvert_v1_PortStatus_To_core_PortStatus(in, out, s) } -func autoConvert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *v1.PortStatus, s conversion.Scope) error { +func autoConvert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *corev1.PortStatus, s conversion.Scope) error { out.Port = in.Port - out.Protocol = v1.Protocol(in.Protocol) + out.Protocol = corev1.Protocol(in.Protocol) out.Error = (*string)(unsafe.Pointer(in.Error)) return nil } // Convert_core_PortStatus_To_v1_PortStatus is an autogenerated conversion function. -func Convert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *v1.PortStatus, s conversion.Scope) error { +func Convert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *corev1.PortStatus, s conversion.Scope) error { return autoConvert_core_PortStatus_To_v1_PortStatus(in, out, s) } -func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { +func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *corev1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly @@ -6973,11 +6991,11 @@ func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.Por } // Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource is an autogenerated conversion function. -func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { +func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *corev1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { return autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in, out, s) } -func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { +func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *corev1.PortworxVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly @@ -6985,31 +7003,31 @@ func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.P } // Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. -func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { +func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *corev1.PortworxVolumeSource, s conversion.Scope) error { return autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) } -func autoConvert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { +func autoConvert_v1_Preconditions_To_core_Preconditions(in *corev1.Preconditions, out *core.Preconditions, s conversion.Scope) error { out.UID = (*types.UID)(unsafe.Pointer(in.UID)) return nil } // Convert_v1_Preconditions_To_core_Preconditions is an autogenerated conversion function. -func Convert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { +func Convert_v1_Preconditions_To_core_Preconditions(in *corev1.Preconditions, out *core.Preconditions, s conversion.Scope) error { return autoConvert_v1_Preconditions_To_core_Preconditions(in, out, s) } -func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { +func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *corev1.Preconditions, s conversion.Scope) error { out.UID = (*types.UID)(unsafe.Pointer(in.UID)) return nil } // Convert_core_Preconditions_To_v1_Preconditions is an autogenerated conversion function. -func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { +func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *corev1.Preconditions, s conversion.Scope) error { return autoConvert_core_Preconditions_To_v1_Preconditions(in, out, s) } -func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { +func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *corev1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { if err := Convert_v1_PodSignature_To_core_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { return err } @@ -7020,11 +7038,11 @@ func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.Pre } // Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { +func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *corev1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { return autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in, out, s) } -func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { +func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *corev1.PreferAvoidPodsEntry, s conversion.Scope) error { if err := Convert_core_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { return err } @@ -7035,11 +7053,11 @@ func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.P } // Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { +func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *corev1.PreferAvoidPodsEntry, s conversion.Scope) error { return autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) } -func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { +func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *corev1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { out.Weight = in.Weight if err := Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { return err @@ -7048,11 +7066,11 @@ func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in * } // Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { +func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *corev1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { return autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in, out, s) } -func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { +func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *corev1.PreferredSchedulingTerm, s conversion.Scope) error { out.Weight = in.Weight if err := Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { return err @@ -7061,11 +7079,11 @@ func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in * } // Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { +func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *corev1.PreferredSchedulingTerm, s conversion.Scope) error { return autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) } -func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { +func autoConvert_v1_Probe_To_core_Probe(in *corev1.Probe, out *core.Probe, s conversion.Scope) error { if err := Convert_v1_ProbeHandler_To_core_ProbeHandler(&in.ProbeHandler, &out.ProbeHandler, s); err != nil { return err } @@ -7079,11 +7097,11 @@ func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s convers } // Convert_v1_Probe_To_core_Probe is an autogenerated conversion function. -func Convert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { +func Convert_v1_Probe_To_core_Probe(in *corev1.Probe, out *core.Probe, s conversion.Scope) error { return autoConvert_v1_Probe_To_core_Probe(in, out, s) } -func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { +func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *corev1.Probe, s conversion.Scope) error { if err := Convert_core_ProbeHandler_To_v1_ProbeHandler(&in.ProbeHandler, &out.ProbeHandler, s); err != nil { return err } @@ -7097,11 +7115,11 @@ func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s convers } // Convert_core_Probe_To_v1_Probe is an autogenerated conversion function. -func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { +func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *corev1.Probe, s conversion.Scope) error { return autoConvert_core_Probe_To_v1_Probe(in, out, s) } -func autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in *v1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error { +func autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in *corev1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error { out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) @@ -7110,24 +7128,24 @@ func autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in *v1.ProbeHandler, out * } // Convert_v1_ProbeHandler_To_core_ProbeHandler is an autogenerated conversion function. -func Convert_v1_ProbeHandler_To_core_ProbeHandler(in *v1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error { +func Convert_v1_ProbeHandler_To_core_ProbeHandler(in *corev1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error { return autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in, out, s) } -func autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *v1.ProbeHandler, s conversion.Scope) error { - out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - out.GRPC = (*v1.GRPCAction)(unsafe.Pointer(in.GRPC)) +func autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *corev1.ProbeHandler, s conversion.Scope) error { + out.Exec = (*corev1.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*corev1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*corev1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + out.GRPC = (*corev1.GRPCAction)(unsafe.Pointer(in.GRPC)) return nil } // Convert_core_ProbeHandler_To_v1_ProbeHandler is an autogenerated conversion function. -func Convert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *v1.ProbeHandler, s conversion.Scope) error { +func Convert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *corev1.ProbeHandler, s conversion.Scope) error { return autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in, out, s) } -func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *corev1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { if in.Sources != nil { in, out := &in.Sources, &out.Sources *out = make([]core.VolumeProjection, len(*in)) @@ -7144,14 +7162,14 @@ func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.P } // Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { +func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *corev1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { return autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in, out, s) } -func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { +func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *corev1.ProjectedVolumeSource, s conversion.Scope) error { if in.Sources != nil { in, out := &in.Sources, &out.Sources - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { if err := Convert_core_VolumeProjection_To_v1_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -7165,11 +7183,11 @@ func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core } // Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { +func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *corev1.ProjectedVolumeSource, s conversion.Scope) error { return autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) } -func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { +func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *corev1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { out.Registry = in.Registry out.Volume = in.Volume out.ReadOnly = in.ReadOnly @@ -7180,11 +7198,11 @@ func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.Quoby } // Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { +func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *corev1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { return autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in, out, s) } -func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { +func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *corev1.QuobyteVolumeSource, s conversion.Scope) error { out.Registry = in.Registry out.Volume = in.Volume out.ReadOnly = in.ReadOnly @@ -7195,11 +7213,11 @@ func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.Quo } // Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { +func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *corev1.QuobyteVolumeSource, s conversion.Scope) error { return autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) } -func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *corev1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) out.RBDImage = in.RBDImage out.FSType = in.FSType @@ -7212,28 +7230,28 @@ func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource( } // Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *corev1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in, out, s) } -func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *corev1.RBDPersistentVolumeSource, s conversion.Scope) error { out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) out.RBDImage = in.RBDImage out.FSType = in.FSType out.RBDPool = in.RBDPool out.RadosUser = in.RadosUser out.Keyring = in.Keyring - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef)) out.ReadOnly = in.ReadOnly return nil } // Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *corev1.RBDPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) } -func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { +func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *corev1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) out.RBDImage = in.RBDImage out.FSType = in.FSType @@ -7246,28 +7264,28 @@ func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSour } // Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { +func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *corev1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { return autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in, out, s) } -func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { +func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *corev1.RBDVolumeSource, s conversion.Scope) error { out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) out.RBDImage = in.RBDImage out.FSType = in.FSType out.RBDPool = in.RBDPool out.RadosUser = in.RadosUser out.Keyring = in.Keyring - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) out.ReadOnly = in.ReadOnly return nil } // Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. -func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { +func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *corev1.RBDVolumeSource, s conversion.Scope) error { return autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) } -func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { +func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *corev1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Range = in.Range out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) @@ -7275,11 +7293,11 @@ func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocati } // Convert_v1_RangeAllocation_To_core_RangeAllocation is an autogenerated conversion function. -func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { +func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *corev1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { return autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in, out, s) } -func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { +func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *corev1.RangeAllocation, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Range = in.Range out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) @@ -7287,11 +7305,11 @@ func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAlloca } // Convert_core_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. -func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { +func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *corev1.RangeAllocation, s conversion.Scope) error { return autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in, out, s) } -func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { +func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *corev1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -7303,11 +7321,11 @@ func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *v1.R } // Convert_v1_ReplicationController_To_core_ReplicationController is an autogenerated conversion function. -func Convert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { +func Convert_v1_ReplicationController_To_core_ReplicationController(in *corev1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { return autoConvert_v1_ReplicationController_To_core_ReplicationController(in, out, s) } -func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { +func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *corev1.ReplicationController, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -7319,11 +7337,11 @@ func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core } // Convert_core_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. -func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { +func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *corev1.ReplicationController, s conversion.Scope) error { return autoConvert_core_ReplicationController_To_v1_ReplicationController(in, out, s) } -func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { +func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *corev1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { out.Type = core.ReplicationControllerConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -7333,13 +7351,13 @@ func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationController } // Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { +func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *corev1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { return autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in, out, s) } -func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = v1.ReplicationControllerConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) +func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *corev1.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = corev1.ReplicationControllerConditionType(in.Type) + out.Status = corev1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -7347,11 +7365,11 @@ func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationController } // Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { +func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *corev1.ReplicationControllerCondition, s conversion.Scope) error { return autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) } -func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { +func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *corev1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -7368,15 +7386,15 @@ func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList( } // Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList is an autogenerated conversion function. -func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { +func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *corev1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { return autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in, out, s) } -func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { +func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *corev1.ReplicationControllerList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.ReplicationController, len(*in)) + *out = make([]corev1.ReplicationController, len(*in)) for i := range *in { if err := Convert_core_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -7389,11 +7407,11 @@ func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList( } // Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. -func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { +func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *corev1.ReplicationControllerList, s conversion.Scope) error { return autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) } -func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { +func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *corev1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -7411,7 +7429,7 @@ func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec( return nil } -func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { +func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *corev1.ReplicationControllerSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -7419,7 +7437,7 @@ func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec( out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) if in.Template != nil { in, out := &in.Template, &out.Template - *out = new(v1.PodTemplateSpec) + *out = new(corev1.PodTemplateSpec) if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { return err } @@ -7429,7 +7447,7 @@ func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec( return nil } -func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { +func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *corev1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -7440,48 +7458,48 @@ func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerSta } // Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { +func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *corev1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { return autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in, out, s) } -func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { +func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *corev1.ReplicationControllerStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]corev1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { +func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *corev1.ReplicationControllerStatus, s conversion.Scope) error { return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) } -func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error { +func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *corev1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error { out.Name = in.Name out.Request = in.Request return nil } // Convert_v1_ResourceClaim_To_core_ResourceClaim is an autogenerated conversion function. -func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error { +func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *corev1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error { return autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in, out, s) } -func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error { +func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *corev1.ResourceClaim, s conversion.Scope) error { out.Name = in.Name out.Request = in.Request return nil } // Convert_core_ResourceClaim_To_v1_ResourceClaim is an autogenerated conversion function. -func Convert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error { +func Convert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *corev1.ResourceClaim, s conversion.Scope) error { return autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in, out, s) } -func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { +func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *corev1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { out.ContainerName = in.ContainerName out.Resource = in.Resource out.Divisor = in.Divisor @@ -7489,11 +7507,11 @@ func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.R } // Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector is an autogenerated conversion function. -func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { +func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *corev1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { return autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in, out, s) } -func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { +func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *corev1.ResourceFieldSelector, s conversion.Scope) error { out.ContainerName = in.ContainerName out.Resource = in.Resource out.Divisor = in.Divisor @@ -7501,33 +7519,33 @@ func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core } // Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. -func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { +func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *corev1.ResourceFieldSelector, s conversion.Scope) error { return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) } -func autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in *v1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error { +func autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in *corev1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error { out.ResourceID = core.ResourceID(in.ResourceID) out.Health = core.ResourceHealthStatus(in.Health) return nil } // Convert_v1_ResourceHealth_To_core_ResourceHealth is an autogenerated conversion function. -func Convert_v1_ResourceHealth_To_core_ResourceHealth(in *v1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error { +func Convert_v1_ResourceHealth_To_core_ResourceHealth(in *corev1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error { return autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in, out, s) } -func autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *v1.ResourceHealth, s conversion.Scope) error { - out.ResourceID = v1.ResourceID(in.ResourceID) - out.Health = v1.ResourceHealthStatus(in.Health) +func autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *corev1.ResourceHealth, s conversion.Scope) error { + out.ResourceID = corev1.ResourceID(in.ResourceID) + out.Health = corev1.ResourceHealthStatus(in.Health) return nil } // Convert_core_ResourceHealth_To_v1_ResourceHealth is an autogenerated conversion function. -func Convert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *v1.ResourceHealth, s conversion.Scope) error { +func Convert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *corev1.ResourceHealth, s conversion.Scope) error { return autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in, out, s) } -func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { +func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *corev1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -7539,11 +7557,11 @@ func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, ou } // Convert_v1_ResourceQuota_To_core_ResourceQuota is an autogenerated conversion function. -func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { +func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *corev1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { return autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in, out, s) } -func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { +func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *corev1.ResourceQuota, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -7555,33 +7573,33 @@ func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, } // Convert_core_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. -func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { +func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *corev1.ResourceQuota, s conversion.Scope) error { return autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in, out, s) } -func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { +func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *corev1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.ResourceQuota)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList is an autogenerated conversion function. -func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { +func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *corev1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { return autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in, out, s) } -func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { +func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *corev1.ResourceQuotaList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.ResourceQuota)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. -func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { +func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *corev1.ResourceQuotaList, s conversion.Scope) error { return autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) } -func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { +func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *corev1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) out.Scopes = *(*[]core.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) out.ScopeSelector = (*core.ScopeSelector)(unsafe.Pointer(in.ScopeSelector)) @@ -7589,45 +7607,45 @@ func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQ } // Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { +func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *corev1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { return autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in, out, s) } -func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - out.ScopeSelector = (*v1.ScopeSelector)(unsafe.Pointer(in.ScopeSelector)) +func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *corev1.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*corev1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]corev1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + out.ScopeSelector = (*corev1.ScopeSelector)(unsafe.Pointer(in.ScopeSelector)) return nil } // Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { +func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *corev1.ResourceQuotaSpec, s conversion.Scope) error { return autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) } -func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { +func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *corev1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) out.Used = *(*core.ResourceList)(unsafe.Pointer(&in.Used)) return nil } // Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { +func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *corev1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { return autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in, out, s) } -func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) +func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *corev1.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*corev1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*corev1.ResourceList)(unsafe.Pointer(&in.Used)) return nil } // Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { +func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *corev1.ResourceQuotaStatus, s conversion.Scope) error { return autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) } -func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { +func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *corev1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) out.Claims = *(*[]core.ResourceClaim)(unsafe.Pointer(&in.Claims)) @@ -7635,45 +7653,45 @@ func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.Res } // Convert_v1_ResourceRequirements_To_core_ResourceRequirements is an autogenerated conversion function. -func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { +func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *corev1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { return autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in, out, s) } -func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) - out.Claims = *(*[]v1.ResourceClaim)(unsafe.Pointer(&in.Claims)) +func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *corev1.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*corev1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*corev1.ResourceList)(unsafe.Pointer(&in.Requests)) + out.Claims = *(*[]corev1.ResourceClaim)(unsafe.Pointer(&in.Claims)) return nil } // Convert_core_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. -func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { +func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *corev1.ResourceRequirements, s conversion.Scope) error { return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) } -func autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in *v1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error { +func autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in *corev1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error { out.Name = core.ResourceName(in.Name) out.Resources = *(*[]core.ResourceHealth)(unsafe.Pointer(&in.Resources)) return nil } // Convert_v1_ResourceStatus_To_core_ResourceStatus is an autogenerated conversion function. -func Convert_v1_ResourceStatus_To_core_ResourceStatus(in *v1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error { +func Convert_v1_ResourceStatus_To_core_ResourceStatus(in *corev1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error { return autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in, out, s) } -func autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *v1.ResourceStatus, s conversion.Scope) error { - out.Name = v1.ResourceName(in.Name) - out.Resources = *(*[]v1.ResourceHealth)(unsafe.Pointer(&in.Resources)) +func autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *corev1.ResourceStatus, s conversion.Scope) error { + out.Name = corev1.ResourceName(in.Name) + out.Resources = *(*[]corev1.ResourceHealth)(unsafe.Pointer(&in.Resources)) return nil } // Convert_core_ResourceStatus_To_v1_ResourceStatus is an autogenerated conversion function. -func Convert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *v1.ResourceStatus, s conversion.Scope) error { +func Convert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *corev1.ResourceStatus, s conversion.Scope) error { return autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in, out, s) } -func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { +func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *corev1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { out.User = in.User out.Role = in.Role out.Type = in.Type @@ -7682,11 +7700,11 @@ func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, } // Convert_v1_SELinuxOptions_To_core_SELinuxOptions is an autogenerated conversion function. -func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { +func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *corev1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { return autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in, out, s) } -func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { +func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *corev1.SELinuxOptions, s conversion.Scope) error { out.User = in.User out.Role = in.Role out.Type = in.Type @@ -7695,11 +7713,11 @@ func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOption } // Convert_core_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. -func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { +func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *corev1.SELinuxOptions, s conversion.Scope) error { return autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) } -func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *corev1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { out.Gateway = in.Gateway out.System = in.System out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) @@ -7714,14 +7732,14 @@ func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolum } // Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *corev1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in, out, s) } -func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *corev1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { out.Gateway = in.Gateway out.System = in.System - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef)) out.SSLEnabled = in.SSLEnabled out.ProtectionDomain = in.ProtectionDomain out.StoragePool = in.StoragePool @@ -7733,11 +7751,11 @@ func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolum } // Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *corev1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) } -func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { +func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *corev1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { out.Gateway = in.Gateway out.System = in.System out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) @@ -7752,14 +7770,14 @@ func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.Scale } // Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { +func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *corev1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { return autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in, out, s) } -func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { +func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *corev1.ScaleIOVolumeSource, s conversion.Scope) error { out.Gateway = in.Gateway out.System = in.System - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) out.SSLEnabled = in.SSLEnabled out.ProtectionDomain = in.ProtectionDomain out.StoragePool = in.StoragePool @@ -7771,31 +7789,31 @@ func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.Sca } // Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { +func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *corev1.ScaleIOVolumeSource, s conversion.Scope) error { return autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) } -func autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in *v1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error { +func autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in *corev1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error { out.MatchExpressions = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) return nil } // Convert_v1_ScopeSelector_To_core_ScopeSelector is an autogenerated conversion function. -func Convert_v1_ScopeSelector_To_core_ScopeSelector(in *v1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error { +func Convert_v1_ScopeSelector_To_core_ScopeSelector(in *corev1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error { return autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in, out, s) } -func autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *v1.ScopeSelector, s conversion.Scope) error { - out.MatchExpressions = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) +func autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *corev1.ScopeSelector, s conversion.Scope) error { + out.MatchExpressions = *(*[]corev1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) return nil } // Convert_core_ScopeSelector_To_v1_ScopeSelector is an autogenerated conversion function. -func Convert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *v1.ScopeSelector, s conversion.Scope) error { +func Convert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *corev1.ScopeSelector, s conversion.Scope) error { return autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in, out, s) } -func autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *v1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error { +func autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *corev1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error { out.ScopeName = core.ResourceQuotaScope(in.ScopeName) out.Operator = core.ScopeSelectorOperator(in.Operator) out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) @@ -7803,45 +7821,45 @@ func autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSele } // Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement is an autogenerated conversion function. -func Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *v1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error { +func Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *corev1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error { return autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in, out, s) } -func autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *v1.ScopedResourceSelectorRequirement, s conversion.Scope) error { - out.ScopeName = v1.ResourceQuotaScope(in.ScopeName) - out.Operator = v1.ScopeSelectorOperator(in.Operator) +func autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *corev1.ScopedResourceSelectorRequirement, s conversion.Scope) error { + out.ScopeName = corev1.ResourceQuotaScope(in.ScopeName) + out.Operator = corev1.ScopeSelectorOperator(in.Operator) out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) return nil } // Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement is an autogenerated conversion function. -func Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *v1.ScopedResourceSelectorRequirement, s conversion.Scope) error { +func Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *corev1.ScopedResourceSelectorRequirement, s conversion.Scope) error { return autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in, out, s) } -func autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in *v1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error { +func autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in *corev1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error { out.Type = core.SeccompProfileType(in.Type) out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile)) return nil } // Convert_v1_SeccompProfile_To_core_SeccompProfile is an autogenerated conversion function. -func Convert_v1_SeccompProfile_To_core_SeccompProfile(in *v1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error { +func Convert_v1_SeccompProfile_To_core_SeccompProfile(in *corev1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error { return autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in, out, s) } -func autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *v1.SeccompProfile, s conversion.Scope) error { - out.Type = v1.SeccompProfileType(in.Type) +func autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *corev1.SeccompProfile, s conversion.Scope) error { + out.Type = corev1.SeccompProfileType(in.Type) out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile)) return nil } // Convert_core_SeccompProfile_To_v1_SeccompProfile is an autogenerated conversion function. -func Convert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *v1.SeccompProfile, s conversion.Scope) error { +func Convert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *corev1.SeccompProfile, s conversion.Scope) error { return autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in, out, s) } -func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { +func autoConvert_v1_Secret_To_core_Secret(in *corev1.Secret, out *core.Secret, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Immutable = (*bool)(unsafe.Pointer(in.Immutable)) out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) @@ -7850,20 +7868,20 @@ func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s con return nil } -func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { +func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *corev1.Secret, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Immutable = (*bool)(unsafe.Pointer(in.Immutable)) out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - out.Type = v1.SecretType(in.Type) + out.Type = corev1.SecretType(in.Type) return nil } // Convert_core_Secret_To_v1_Secret is an autogenerated conversion function. -func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { +func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *corev1.Secret, s conversion.Scope) error { return autoConvert_core_Secret_To_v1_Secret(in, out, s) } -func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { +func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *corev1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -7872,11 +7890,11 @@ func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSour } // Convert_v1_SecretEnvSource_To_core_SecretEnvSource is an autogenerated conversion function. -func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { +func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *corev1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { return autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in, out, s) } -func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { +func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *corev1.SecretEnvSource, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -7885,11 +7903,11 @@ func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSo } // Convert_core_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. -func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { +func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *corev1.SecretEnvSource, s conversion.Scope) error { return autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) } -func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { +func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *corev1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -7899,11 +7917,11 @@ func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKey } // Convert_v1_SecretKeySelector_To_core_SecretKeySelector is an autogenerated conversion function. -func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { +func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *corev1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { return autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in, out, s) } -func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { +func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *corev1.SecretKeySelector, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -7913,11 +7931,11 @@ func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretK } // Convert_core_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. -func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { +func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *corev1.SecretKeySelector, s conversion.Scope) error { return autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) } -func autoConvert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { +func autoConvert_v1_SecretList_To_core_SecretList(in *corev1.SecretList, out *core.SecretList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -7934,15 +7952,15 @@ func autoConvert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.S } // Convert_v1_SecretList_To_core_SecretList is an autogenerated conversion function. -func Convert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { +func Convert_v1_SecretList_To_core_SecretList(in *corev1.SecretList, out *core.SecretList, s conversion.Scope) error { return autoConvert_v1_SecretList_To_core_SecretList(in, out, s) } -func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { +func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *corev1.SecretList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.Secret, len(*in)) + *out = make([]corev1.Secret, len(*in)) for i := range *in { if err := Convert_core_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -7955,11 +7973,11 @@ func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.S } // Convert_core_SecretList_To_v1_SecretList is an autogenerated conversion function. -func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { +func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *corev1.SecretList, s conversion.Scope) error { return autoConvert_core_SecretList_To_v1_SecretList(in, out, s) } -func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { +func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *corev1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -7969,47 +7987,47 @@ func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProje } // Convert_v1_SecretProjection_To_core_SecretProjection is an autogenerated conversion function. -func Convert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { +func Convert_v1_SecretProjection_To_core_SecretProjection(in *corev1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { return autoConvert_v1_SecretProjection_To_core_SecretProjection(in, out, s) } -func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { +func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *corev1.SecretProjection, s conversion.Scope) error { if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items)) out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } // Convert_core_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. -func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { +func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *corev1.SecretProjection, s conversion.Scope) error { return autoConvert_core_SecretProjection_To_v1_SecretProjection(in, out, s) } -func autoConvert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { +func autoConvert_v1_SecretReference_To_core_SecretReference(in *corev1.SecretReference, out *core.SecretReference, s conversion.Scope) error { out.Name = in.Name out.Namespace = in.Namespace return nil } // Convert_v1_SecretReference_To_core_SecretReference is an autogenerated conversion function. -func Convert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { +func Convert_v1_SecretReference_To_core_SecretReference(in *corev1.SecretReference, out *core.SecretReference, s conversion.Scope) error { return autoConvert_v1_SecretReference_To_core_SecretReference(in, out, s) } -func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { +func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *corev1.SecretReference, s conversion.Scope) error { out.Name = in.Name out.Namespace = in.Namespace return nil } // Convert_core_SecretReference_To_v1_SecretReference is an autogenerated conversion function. -func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { +func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *corev1.SecretReference, s conversion.Scope) error { return autoConvert_core_SecretReference_To_v1_SecretReference(in, out, s) } -func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { +func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *corev1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) @@ -8018,24 +8036,24 @@ func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretV } // Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource is an autogenerated conversion function. -func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { +func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *corev1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { return autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in, out, s) } -func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { +func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *corev1.SecretVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items)) out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) out.Optional = (*bool)(unsafe.Pointer(in.Optional)) return nil } // Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. -func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { +func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *corev1.SecretVolumeSource, s conversion.Scope) error { return autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) } -func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { +func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *corev1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { out.Capabilities = (*core.Capabilities)(unsafe.Pointer(in.Capabilities)) out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) @@ -8052,32 +8070,32 @@ func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityConte } // Convert_v1_SecurityContext_To_core_SecurityContext is an autogenerated conversion function. -func Convert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { +func Convert_v1_SecurityContext_To_core_SecurityContext(in *corev1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { return autoConvert_v1_SecurityContext_To_core_SecurityContext(in, out, s) } -func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) +func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *corev1.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*corev1.Capabilities)(unsafe.Pointer(in.Capabilities)) out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.WindowsOptions = (*v1.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions)) + out.SELinuxOptions = (*corev1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.WindowsOptions = (*corev1.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions)) out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup)) out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - out.ProcMount = (*v1.ProcMountType)(unsafe.Pointer(in.ProcMount)) - out.SeccompProfile = (*v1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile)) - out.AppArmorProfile = (*v1.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile)) + out.ProcMount = (*corev1.ProcMountType)(unsafe.Pointer(in.ProcMount)) + out.SeccompProfile = (*corev1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile)) + out.AppArmorProfile = (*corev1.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile)) return nil } // Convert_core_SecurityContext_To_v1_SecurityContext is an autogenerated conversion function. -func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { +func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *corev1.SecurityContext, s conversion.Scope) error { return autoConvert_core_SecurityContext_To_v1_SecurityContext(in, out, s) } -func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { +func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *corev1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Reference, &out.Reference, s); err != nil { return err } @@ -8085,11 +8103,11 @@ func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *v1.Seria } // Convert_v1_SerializedReference_To_core_SerializedReference is an autogenerated conversion function. -func Convert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { +func Convert_v1_SerializedReference_To_core_SerializedReference(in *corev1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { return autoConvert_v1_SerializedReference_To_core_SerializedReference(in, out, s) } -func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { +func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *corev1.SerializedReference, s conversion.Scope) error { if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { return err } @@ -8097,11 +8115,11 @@ func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.Ser } // Convert_core_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. -func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { +func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *corev1.SerializedReference, s conversion.Scope) error { return autoConvert_core_SerializedReference_To_v1_SerializedReference(in, out, s) } -func autoConvert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { +func autoConvert_v1_Service_To_core_Service(in *corev1.Service, out *core.Service, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_ServiceSpec_To_core_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -8113,11 +8131,11 @@ func autoConvert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s } // Convert_v1_Service_To_core_Service is an autogenerated conversion function. -func Convert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { +func Convert_v1_Service_To_core_Service(in *corev1.Service, out *core.Service, s conversion.Scope) error { return autoConvert_v1_Service_To_core_Service(in, out, s) } -func autoConvert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { +func autoConvert_core_Service_To_v1_Service(in *core.Service, out *corev1.Service, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -8129,11 +8147,11 @@ func autoConvert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s } // Convert_core_Service_To_v1_Service is an autogenerated conversion function. -func Convert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { +func Convert_core_Service_To_v1_Service(in *core.Service, out *corev1.Service, s conversion.Scope) error { return autoConvert_core_Service_To_v1_Service(in, out, s) } -func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { +func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *corev1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Secrets = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Secrets)) out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) @@ -8142,46 +8160,46 @@ func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, } // Convert_v1_ServiceAccount_To_core_ServiceAccount is an autogenerated conversion function. -func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { +func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *corev1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { return autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in, out, s) } -func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { +func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *corev1.ServiceAccount, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Secrets = *(*[]corev1.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]corev1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) return nil } // Convert_core_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. -func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { +func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *corev1.ServiceAccount, s conversion.Scope) error { return autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in, out, s) } -func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { +func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *corev1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]core.ServiceAccount)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1_ServiceAccountList_To_core_ServiceAccountList is an autogenerated conversion function. -func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { +func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *corev1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { return autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in, out, s) } -func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { +func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *corev1.ServiceAccountList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]corev1.ServiceAccount)(unsafe.Pointer(&in.Items)) return nil } // Convert_core_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. -func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { +func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *corev1.ServiceAccountList, s conversion.Scope) error { return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) } -func autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *v1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { +func autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *corev1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { out.Audience = in.Audience if err := metav1.Convert_Pointer_int64_To_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { return err @@ -8191,11 +8209,11 @@ func autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenPro } // Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection is an autogenerated conversion function. -func Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *v1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { +func Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *corev1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { return autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in, out, s) } -func autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *v1.ServiceAccountTokenProjection, s conversion.Scope) error { +func autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *corev1.ServiceAccountTokenProjection, s conversion.Scope) error { out.Audience = in.Audience if err := metav1.Convert_int64_To_Pointer_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { return err @@ -8205,11 +8223,11 @@ func autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenPro } // Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection is an autogenerated conversion function. -func Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *v1.ServiceAccountTokenProjection, s conversion.Scope) error { +func Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *corev1.ServiceAccountTokenProjection, s conversion.Scope) error { return autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in, out, s) } -func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { +func autoConvert_v1_ServiceList_To_core_ServiceList(in *corev1.ServiceList, out *core.ServiceList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -8226,15 +8244,15 @@ func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *cor } // Convert_v1_ServiceList_To_core_ServiceList is an autogenerated conversion function. -func Convert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { +func Convert_v1_ServiceList_To_core_ServiceList(in *corev1.ServiceList, out *core.ServiceList, s conversion.Scope) error { return autoConvert_v1_ServiceList_To_core_ServiceList(in, out, s) } -func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { +func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *corev1.ServiceList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.Service, len(*in)) + *out = make([]corev1.Service, len(*in)) for i := range *in { if err := Convert_core_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { return err @@ -8247,11 +8265,11 @@ func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v } // Convert_core_ServiceList_To_v1_ServiceList is an autogenerated conversion function. -func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { +func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *corev1.ServiceList, s conversion.Scope) error { return autoConvert_core_ServiceList_To_v1_ServiceList(in, out, s) } -func autoConvert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { +func autoConvert_v1_ServicePort_To_core_ServicePort(in *corev1.ServicePort, out *core.ServicePort, s conversion.Scope) error { out.Name = in.Name out.Protocol = core.Protocol(in.Protocol) out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) @@ -8262,13 +8280,13 @@ func autoConvert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *cor } // Convert_v1_ServicePort_To_core_ServicePort is an autogenerated conversion function. -func Convert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { +func Convert_v1_ServicePort_To_core_ServicePort(in *corev1.ServicePort, out *core.ServicePort, s conversion.Scope) error { return autoConvert_v1_ServicePort_To_core_ServicePort(in, out, s) } -func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { +func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *corev1.ServicePort, s conversion.Scope) error { out.Name = in.Name - out.Protocol = v1.Protocol(in.Protocol) + out.Protocol = corev1.Protocol(in.Protocol) out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol)) out.Port = in.Port out.TargetPort = in.TargetPort @@ -8277,31 +8295,31 @@ func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v } // Convert_core_ServicePort_To_v1_ServicePort is an autogenerated conversion function. -func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { +func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *corev1.ServicePort, s conversion.Scope) error { return autoConvert_core_ServicePort_To_v1_ServicePort(in, out, s) } -func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { +func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *corev1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil } // Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions is an autogenerated conversion function. -func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { +func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *corev1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { return autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in, out, s) } -func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { +func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *corev1.ServiceProxyOptions, s conversion.Scope) error { out.Path = in.Path return nil } // Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. -func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { +func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *corev1.ServiceProxyOptions, s conversion.Scope) error { return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) } -func autoConvert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *v1.ServiceProxyOptions, s conversion.Scope) error { +func autoConvert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *corev1.ServiceProxyOptions, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 { @@ -8315,11 +8333,11 @@ func autoConvert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *v1.Se } // Convert_url_Values_To_v1_ServiceProxyOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *v1.ServiceProxyOptions, s conversion.Scope) error { +func Convert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *corev1.ServiceProxyOptions, s conversion.Scope) error { return autoConvert_url_Values_To_v1_ServiceProxyOptions(in, out, s) } -func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { +func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *corev1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports)) out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) out.ClusterIP = in.ClusterIP @@ -8344,40 +8362,40 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor } // Convert_v1_ServiceSpec_To_core_ServiceSpec is an autogenerated conversion function. -func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { +func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *corev1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { return autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in, out, s) } -func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - out.Type = v1.ServiceType(in.Type) - out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) +func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *corev1.ServiceSpec, s conversion.Scope) error { + out.Type = corev1.ServiceType(in.Type) + out.Ports = *(*[]corev1.ServicePort)(unsafe.Pointer(&in.Ports)) out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) out.ClusterIP = in.ClusterIP out.ClusterIPs = *(*[]string)(unsafe.Pointer(&in.ClusterIPs)) - out.IPFamilies = *(*[]v1.IPFamily)(unsafe.Pointer(&in.IPFamilies)) - out.IPFamilyPolicy = (*v1.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy)) + out.IPFamilies = *(*[]corev1.IPFamily)(unsafe.Pointer(&in.IPFamilies)) + out.IPFamilyPolicy = (*corev1.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy)) out.ExternalName = in.ExternalName out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) - out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + out.SessionAffinity = corev1.ServiceAffinity(in.SessionAffinity) + out.SessionAffinityConfig = (*corev1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy) + out.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy) out.HealthCheckNodePort = in.HealthCheckNodePort out.PublishNotReadyAddresses = in.PublishNotReadyAddresses out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts)) out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass)) - out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy)) + out.InternalTrafficPolicy = (*corev1.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy)) out.TrafficDistribution = (*string)(unsafe.Pointer(in.TrafficDistribution)) return nil } // Convert_core_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. -func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { +func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *corev1.ServiceSpec, s conversion.Scope) error { return autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in, out, s) } -func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { +func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *corev1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { return err } @@ -8386,11 +8404,11 @@ func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, ou } // Convert_v1_ServiceStatus_To_core_ServiceStatus is an autogenerated conversion function. -func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { +func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *corev1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { return autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in, out, s) } -func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { +func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *corev1.ServiceStatus, s conversion.Scope) error { if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { return err } @@ -8399,51 +8417,51 @@ func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, } // Convert_core_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. -func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { +func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *corev1.ServiceStatus, s conversion.Scope) error { return autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in, out, s) } -func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { +func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *corev1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { out.ClientIP = (*core.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) return nil } // Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig is an autogenerated conversion function. -func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { +func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *corev1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { return autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in, out, s) } -func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) +func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *corev1.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*corev1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) return nil } // Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. -func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { +func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *corev1.SessionAffinityConfig, s conversion.Scope) error { return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) } -func autoConvert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error { +func autoConvert_v1_SleepAction_To_core_SleepAction(in *corev1.SleepAction, out *core.SleepAction, s conversion.Scope) error { out.Seconds = in.Seconds return nil } // Convert_v1_SleepAction_To_core_SleepAction is an autogenerated conversion function. -func Convert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error { +func Convert_v1_SleepAction_To_core_SleepAction(in *corev1.SleepAction, out *core.SleepAction, s conversion.Scope) error { return autoConvert_v1_SleepAction_To_core_SleepAction(in, out, s) } -func autoConvert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error { +func autoConvert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *corev1.SleepAction, s conversion.Scope) error { out.Seconds = in.Seconds return nil } // Convert_core_SleepAction_To_v1_SleepAction is an autogenerated conversion function. -func Convert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error { +func Convert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *corev1.SleepAction, s conversion.Scope) error { return autoConvert_core_SleepAction_To_v1_SleepAction(in, out, s) } -func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *corev1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { out.VolumeName = in.VolumeName out.VolumeNamespace = in.VolumeNamespace out.FSType = in.FSType @@ -8453,25 +8471,25 @@ func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentV } // Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { +func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *corev1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { return autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in, out, s) } -func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { +func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *corev1.StorageOSPersistentVolumeSource, s conversion.Scope) error { out.VolumeName = in.VolumeName out.VolumeNamespace = in.VolumeNamespace out.FSType = in.FSType out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.ObjectReference)(unsafe.Pointer(in.SecretRef)) return nil } // Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { +func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *corev1.StorageOSPersistentVolumeSource, s conversion.Scope) error { return autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) } -func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { +func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *corev1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { out.VolumeName = in.VolumeName out.VolumeNamespace = in.VolumeNamespace out.FSType = in.FSType @@ -8481,69 +8499,69 @@ func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.S } // Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { +func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *corev1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { return autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in, out, s) } -func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { +func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *corev1.StorageOSVolumeSource, s conversion.Scope) error { out.VolumeName = in.VolumeName out.VolumeNamespace = in.VolumeNamespace out.FSType = in.FSType out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) return nil } // Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { +func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *corev1.StorageOSVolumeSource, s conversion.Scope) error { return autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) } -func autoConvert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { +func autoConvert_v1_Sysctl_To_core_Sysctl(in *corev1.Sysctl, out *core.Sysctl, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value return nil } // Convert_v1_Sysctl_To_core_Sysctl is an autogenerated conversion function. -func Convert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { +func Convert_v1_Sysctl_To_core_Sysctl(in *corev1.Sysctl, out *core.Sysctl, s conversion.Scope) error { return autoConvert_v1_Sysctl_To_core_Sysctl(in, out, s) } -func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { +func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *corev1.Sysctl, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value return nil } // Convert_core_Sysctl_To_v1_Sysctl is an autogenerated conversion function. -func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { +func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *corev1.Sysctl, s conversion.Scope) error { return autoConvert_core_Sysctl_To_v1_Sysctl(in, out, s) } -func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { +func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *corev1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { out.Port = in.Port out.Host = in.Host return nil } // Convert_v1_TCPSocketAction_To_core_TCPSocketAction is an autogenerated conversion function. -func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { +func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *corev1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { return autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in, out, s) } -func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { +func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *corev1.TCPSocketAction, s conversion.Scope) error { out.Port = in.Port out.Host = in.Host return nil } // Convert_core_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. -func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { +func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *corev1.TCPSocketAction, s conversion.Scope) error { return autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) } -func autoConvert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { +func autoConvert_v1_Taint_To_core_Taint(in *corev1.Taint, out *core.Taint, s conversion.Scope) error { out.Key = in.Key out.Value = in.Value out.Effect = core.TaintEffect(in.Effect) @@ -8552,24 +8570,24 @@ func autoConvert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s convers } // Convert_v1_Taint_To_core_Taint is an autogenerated conversion function. -func Convert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { +func Convert_v1_Taint_To_core_Taint(in *corev1.Taint, out *core.Taint, s conversion.Scope) error { return autoConvert_v1_Taint_To_core_Taint(in, out, s) } -func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { +func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *corev1.Taint, s conversion.Scope) error { out.Key = in.Key out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) + out.Effect = corev1.TaintEffect(in.Effect) out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded)) return nil } // Convert_core_Taint_To_v1_Taint is an autogenerated conversion function. -func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { +func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *corev1.Taint, s conversion.Scope) error { return autoConvert_core_Taint_To_v1_Taint(in, out, s) } -func autoConvert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { +func autoConvert_v1_Toleration_To_core_Toleration(in *corev1.Toleration, out *core.Toleration, s conversion.Scope) error { out.Key = in.Key out.Operator = core.TolerationOperator(in.Operator) out.Value = in.Value @@ -8579,67 +8597,67 @@ func autoConvert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.T } // Convert_v1_Toleration_To_core_Toleration is an autogenerated conversion function. -func Convert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { +func Convert_v1_Toleration_To_core_Toleration(in *corev1.Toleration, out *core.Toleration, s conversion.Scope) error { return autoConvert_v1_Toleration_To_core_Toleration(in, out, s) } -func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { +func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *corev1.Toleration, s conversion.Scope) error { out.Key = in.Key - out.Operator = v1.TolerationOperator(in.Operator) + out.Operator = corev1.TolerationOperator(in.Operator) out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) + out.Effect = corev1.TaintEffect(in.Effect) out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) return nil } // Convert_core_Toleration_To_v1_Toleration is an autogenerated conversion function. -func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { +func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *corev1.Toleration, s conversion.Scope) error { return autoConvert_core_Toleration_To_v1_Toleration(in, out, s) } -func autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *v1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error { +func autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *corev1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error { out.Key = in.Key out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) return nil } // Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement is an autogenerated conversion function. -func Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *v1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error { +func Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *corev1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error { return autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in, out, s) } -func autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *v1.TopologySelectorLabelRequirement, s conversion.Scope) error { +func autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *corev1.TopologySelectorLabelRequirement, s conversion.Scope) error { out.Key = in.Key out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) return nil } // Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement is an autogenerated conversion function. -func Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *v1.TopologySelectorLabelRequirement, s conversion.Scope) error { +func Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *corev1.TopologySelectorLabelRequirement, s conversion.Scope) error { return autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in, out, s) } -func autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *v1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error { +func autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *corev1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error { out.MatchLabelExpressions = *(*[]core.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions)) return nil } // Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm is an autogenerated conversion function. -func Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *v1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error { +func Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *corev1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error { return autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in, out, s) } -func autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *v1.TopologySelectorTerm, s conversion.Scope) error { - out.MatchLabelExpressions = *(*[]v1.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions)) +func autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *corev1.TopologySelectorTerm, s conversion.Scope) error { + out.MatchLabelExpressions = *(*[]corev1.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions)) return nil } // Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm is an autogenerated conversion function. -func Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *v1.TopologySelectorTerm, s conversion.Scope) error { +func Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *corev1.TopologySelectorTerm, s conversion.Scope) error { return autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in, out, s) } -func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *v1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error { +func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *corev1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error { out.MaxSkew = in.MaxSkew out.TopologyKey = in.TopologyKey out.WhenUnsatisfiable = core.UnsatisfiableConstraintAction(in.WhenUnsatisfiable) @@ -8652,28 +8670,28 @@ func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in } // Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint is an autogenerated conversion function. -func Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *v1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error { +func Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *corev1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error { return autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in, out, s) } -func autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *v1.TopologySpreadConstraint, s conversion.Scope) error { +func autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *corev1.TopologySpreadConstraint, s conversion.Scope) error { out.MaxSkew = in.MaxSkew out.TopologyKey = in.TopologyKey - out.WhenUnsatisfiable = v1.UnsatisfiableConstraintAction(in.WhenUnsatisfiable) + out.WhenUnsatisfiable = corev1.UnsatisfiableConstraintAction(in.WhenUnsatisfiable) out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains)) - out.NodeAffinityPolicy = (*v1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy)) - out.NodeTaintsPolicy = (*v1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy)) + out.NodeAffinityPolicy = (*corev1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy)) + out.NodeTaintsPolicy = (*corev1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy)) out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys)) return nil } // Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint is an autogenerated conversion function. -func Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *v1.TopologySpreadConstraint, s conversion.Scope) error { +func Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *corev1.TopologySpreadConstraint, s conversion.Scope) error { return autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in, out, s) } -func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *v1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { +func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *corev1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) out.Kind = in.Kind out.Name = in.Name @@ -8681,11 +8699,11 @@ func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference( } // Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference is an autogenerated conversion function. -func Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *v1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { +func Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *corev1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { return autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in, out, s) } -func autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *v1.TypedLocalObjectReference, s conversion.Scope) error { +func autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *corev1.TypedLocalObjectReference, s conversion.Scope) error { out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) out.Kind = in.Kind out.Name = in.Name @@ -8693,11 +8711,11 @@ func autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference( } // Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference is an autogenerated conversion function. -func Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *v1.TypedLocalObjectReference, s conversion.Scope) error { +func Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *corev1.TypedLocalObjectReference, s conversion.Scope) error { return autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in, out, s) } -func autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error { +func autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in *corev1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error { out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) out.Kind = in.Kind out.Name = in.Name @@ -8706,11 +8724,11 @@ func autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.Typ } // Convert_v1_TypedObjectReference_To_core_TypedObjectReference is an autogenerated conversion function. -func Convert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error { +func Convert_v1_TypedObjectReference_To_core_TypedObjectReference(in *corev1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error { return autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in, out, s) } -func autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *v1.TypedObjectReference, s conversion.Scope) error { +func autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *corev1.TypedObjectReference, s conversion.Scope) error { out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) out.Kind = in.Kind out.Name = in.Name @@ -8719,11 +8737,11 @@ func autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.T } // Convert_core_TypedObjectReference_To_v1_TypedObjectReference is an autogenerated conversion function. -func Convert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *v1.TypedObjectReference, s conversion.Scope) error { +func Convert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *corev1.TypedObjectReference, s conversion.Scope) error { return autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in, out, s) } -func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { +func autoConvert_v1_Volume_To_core_Volume(in *corev1.Volume, out *core.Volume, s conversion.Scope) error { out.Name = in.Name if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { return err @@ -8732,11 +8750,11 @@ func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s con } // Convert_v1_Volume_To_core_Volume is an autogenerated conversion function. -func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { +func Convert_v1_Volume_To_core_Volume(in *corev1.Volume, out *core.Volume, s conversion.Scope) error { return autoConvert_v1_Volume_To_core_Volume(in, out, s) } -func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { +func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *corev1.Volume, s conversion.Scope) error { out.Name = in.Name if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { return err @@ -8745,33 +8763,33 @@ func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s con } // Convert_core_Volume_To_v1_Volume is an autogenerated conversion function. -func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { +func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *corev1.Volume, s conversion.Scope) error { return autoConvert_core_Volume_To_v1_Volume(in, out, s) } -func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { +func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *corev1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { out.Name = in.Name out.DevicePath = in.DevicePath return nil } // Convert_v1_VolumeDevice_To_core_VolumeDevice is an autogenerated conversion function. -func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { +func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *corev1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { return autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in, out, s) } -func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { +func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *corev1.VolumeDevice, s conversion.Scope) error { out.Name = in.Name out.DevicePath = in.DevicePath return nil } // Convert_core_VolumeDevice_To_v1_VolumeDevice is an autogenerated conversion function. -func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { +func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *corev1.VolumeDevice, s conversion.Scope) error { return autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in, out, s) } -func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { +func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *corev1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { out.Name = in.Name out.ReadOnly = in.ReadOnly out.RecursiveReadOnly = (*core.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly)) @@ -8783,27 +8801,27 @@ func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *cor } // Convert_v1_VolumeMount_To_core_VolumeMount is an autogenerated conversion function. -func Convert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { +func Convert_v1_VolumeMount_To_core_VolumeMount(in *corev1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { return autoConvert_v1_VolumeMount_To_core_VolumeMount(in, out, s) } -func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { +func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *corev1.VolumeMount, s conversion.Scope) error { out.Name = in.Name out.ReadOnly = in.ReadOnly - out.RecursiveReadOnly = (*v1.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly)) + out.RecursiveReadOnly = (*corev1.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly)) out.MountPath = in.MountPath out.SubPath = in.SubPath - out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + out.MountPropagation = (*corev1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) out.SubPathExpr = in.SubPathExpr return nil } // Convert_core_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. -func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { +func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *corev1.VolumeMount, s conversion.Scope) error { return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s) } -func autoConvert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *v1.VolumeMountStatus, out *core.VolumeMountStatus, s conversion.Scope) error { +func autoConvert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *corev1.VolumeMountStatus, out *core.VolumeMountStatus, s conversion.Scope) error { out.Name = in.Name out.MountPath = in.MountPath out.ReadOnly = in.ReadOnly @@ -8812,44 +8830,44 @@ func autoConvert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *v1.VolumeMou } // Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus is an autogenerated conversion function. -func Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *v1.VolumeMountStatus, out *core.VolumeMountStatus, s conversion.Scope) error { +func Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *corev1.VolumeMountStatus, out *core.VolumeMountStatus, s conversion.Scope) error { return autoConvert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in, out, s) } -func autoConvert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in *core.VolumeMountStatus, out *v1.VolumeMountStatus, s conversion.Scope) error { +func autoConvert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in *core.VolumeMountStatus, out *corev1.VolumeMountStatus, s conversion.Scope) error { out.Name = in.Name out.MountPath = in.MountPath out.ReadOnly = in.ReadOnly - out.RecursiveReadOnly = (*v1.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly)) + out.RecursiveReadOnly = (*corev1.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly)) return nil } // Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus is an autogenerated conversion function. -func Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in *core.VolumeMountStatus, out *v1.VolumeMountStatus, s conversion.Scope) error { +func Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in *core.VolumeMountStatus, out *corev1.VolumeMountStatus, s conversion.Scope) error { return autoConvert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in, out, s) } -func autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error { +func autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *corev1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error { out.Required = (*core.NodeSelector)(unsafe.Pointer(in.Required)) return nil } // Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity is an autogenerated conversion function. -func Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error { +func Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *corev1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error { return autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in, out, s) } -func autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error { - out.Required = (*v1.NodeSelector)(unsafe.Pointer(in.Required)) +func autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *corev1.VolumeNodeAffinity, s conversion.Scope) error { + out.Required = (*corev1.NodeSelector)(unsafe.Pointer(in.Required)) return nil } // Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity is an autogenerated conversion function. -func Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error { +func Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *corev1.VolumeNodeAffinity, s conversion.Scope) error { return autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in, out, s) } -func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { +func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *corev1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret)) out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) @@ -8867,55 +8885,55 @@ func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProje } // Convert_v1_VolumeProjection_To_core_VolumeProjection is an autogenerated conversion function. -func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { +func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *corev1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { return autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in, out, s) } -func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) +func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *corev1.VolumeProjection, s conversion.Scope) error { + out.Secret = (*corev1.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*corev1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*corev1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) if in.ServiceAccountToken != nil { in, out := &in.ServiceAccountToken, &out.ServiceAccountToken - *out = new(v1.ServiceAccountTokenProjection) + *out = new(corev1.ServiceAccountTokenProjection) if err := Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(*in, *out, s); err != nil { return err } } else { out.ServiceAccountToken = nil } - out.ClusterTrustBundle = (*v1.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle)) + out.ClusterTrustBundle = (*corev1.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle)) return nil } // Convert_core_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. -func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { +func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *corev1.VolumeProjection, s conversion.Scope) error { return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s) } -func autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *v1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error { +func autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *corev1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error { out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) return nil } // Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements is an autogenerated conversion function. -func Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *v1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error { +func Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *corev1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error { return autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in, out, s) } -func autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *v1.VolumeResourceRequirements, s conversion.Scope) error { - out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) +func autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *corev1.VolumeResourceRequirements, s conversion.Scope) error { + out.Limits = *(*corev1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*corev1.ResourceList)(unsafe.Pointer(&in.Requests)) return nil } // Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements is an autogenerated conversion function. -func Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *v1.VolumeResourceRequirements, s conversion.Scope) error { +func Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *corev1.VolumeResourceRequirements, s conversion.Scope) error { return autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in, out, s) } -func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { +func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *corev1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) @@ -8958,58 +8976,58 @@ func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out * } // Convert_v1_VolumeSource_To_core_VolumeSource is an autogenerated conversion function. -func Convert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { +func Convert_v1_VolumeSource_To_core_VolumeSource(in *corev1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { return autoConvert_v1_VolumeSource_To_core_VolumeSource(in, out, s) } -func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) +func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *corev1.VolumeSource, s conversion.Scope) error { + out.HostPath = (*corev1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*corev1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*corev1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*corev1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*corev1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*corev1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*corev1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*corev1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*corev1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*corev1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*corev1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*corev1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*corev1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*corev1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*corev1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*corev1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*corev1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*corev1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*corev1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*corev1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*corev1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*corev1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*corev1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) if in.Projected != nil { in, out := &in.Projected, &out.Projected - *out = new(v1.ProjectedVolumeSource) + *out = new(corev1.ProjectedVolumeSource) if err := Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(*in, *out, s); err != nil { return err } } else { out.Projected = nil } - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - out.CSI = (*v1.CSIVolumeSource)(unsafe.Pointer(in.CSI)) - out.Ephemeral = (*v1.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral)) - out.Image = (*v1.ImageVolumeSource)(unsafe.Pointer(in.Image)) + out.PortworxVolume = (*corev1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*corev1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*corev1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*corev1.CSIVolumeSource)(unsafe.Pointer(in.CSI)) + out.Ephemeral = (*corev1.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral)) + out.Image = (*corev1.ImageVolumeSource)(unsafe.Pointer(in.Image)) return nil } // Convert_core_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. -func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { +func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *corev1.VolumeSource, s conversion.Scope) error { return autoConvert_core_VolumeSource_To_v1_VolumeSource(in, out, s) } -func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *corev1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { out.VolumePath = in.VolumePath out.FSType = in.FSType out.StoragePolicyName = in.StoragePolicyName @@ -9018,11 +9036,11 @@ func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVol } // Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { +func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *corev1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in, out, s) } -func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { +func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *corev1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { out.VolumePath = in.VolumePath out.FSType = in.FSType out.StoragePolicyName = in.StoragePolicyName @@ -9031,11 +9049,11 @@ func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVol } // Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { +func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *corev1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { return autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) } -func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { +func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *corev1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { out.Weight = in.Weight if err := Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { return err @@ -9044,11 +9062,11 @@ func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in * } // Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { +func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *corev1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { return autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in, out, s) } -func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { +func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *corev1.WeightedPodAffinityTerm, s conversion.Scope) error { out.Weight = in.Weight if err := Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { return err @@ -9057,11 +9075,11 @@ func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in * } // Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { +func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *corev1.WeightedPodAffinityTerm, s conversion.Scope) error { return autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) } -func autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *v1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error { +func autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *corev1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error { out.GMSACredentialSpecName = (*string)(unsafe.Pointer(in.GMSACredentialSpecName)) out.GMSACredentialSpec = (*string)(unsafe.Pointer(in.GMSACredentialSpec)) out.RunAsUserName = (*string)(unsafe.Pointer(in.RunAsUserName)) @@ -9070,11 +9088,11 @@ func autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContext } // Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions is an autogenerated conversion function. -func Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *v1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error { +func Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *corev1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error { return autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in, out, s) } -func autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *v1.WindowsSecurityContextOptions, s conversion.Scope) error { +func autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *corev1.WindowsSecurityContextOptions, s conversion.Scope) error { out.GMSACredentialSpecName = (*string)(unsafe.Pointer(in.GMSACredentialSpecName)) out.GMSACredentialSpec = (*string)(unsafe.Pointer(in.GMSACredentialSpec)) out.RunAsUserName = (*string)(unsafe.Pointer(in.RunAsUserName)) @@ -9083,6 +9101,6 @@ func autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContext } // Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions is an autogenerated conversion function. -func Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *v1.WindowsSecurityContextOptions, s conversion.Scope) error { +func Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *corev1.WindowsSecurityContextOptions, s conversion.Scope) error { return autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in, out, s) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go index fd65b9517..3b6eb4f0a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go @@ -22,7 +22,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,63 +30,64 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1.ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*v1.ConfigMap)) }) - scheme.AddTypeDefaultingFunc(&v1.ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*v1.ConfigMapList)) }) - scheme.AddTypeDefaultingFunc(&v1.Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*v1.Endpoints)) }) - scheme.AddTypeDefaultingFunc(&v1.EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*v1.EndpointsList)) }) - scheme.AddTypeDefaultingFunc(&v1.LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*v1.LimitRange)) }) - scheme.AddTypeDefaultingFunc(&v1.LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*v1.LimitRangeList)) }) - scheme.AddTypeDefaultingFunc(&v1.Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*v1.Namespace)) }) - scheme.AddTypeDefaultingFunc(&v1.NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*v1.NamespaceList)) }) - scheme.AddTypeDefaultingFunc(&v1.Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*v1.Node)) }) - scheme.AddTypeDefaultingFunc(&v1.NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*v1.NodeList)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*v1.PersistentVolume)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*v1.PersistentVolumeClaim)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaimList{}, func(obj interface{}) { - SetObjectDefaults_PersistentVolumeClaimList(obj.(*v1.PersistentVolumeClaimList)) + scheme.AddTypeDefaultingFunc(&corev1.ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*corev1.ConfigMap)) }) + scheme.AddTypeDefaultingFunc(&corev1.ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*corev1.ConfigMapList)) }) + scheme.AddTypeDefaultingFunc(&corev1.Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*corev1.Endpoints)) }) + scheme.AddTypeDefaultingFunc(&corev1.EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*corev1.EndpointsList)) }) + scheme.AddTypeDefaultingFunc(&corev1.LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*corev1.LimitRange)) }) + scheme.AddTypeDefaultingFunc(&corev1.LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*corev1.LimitRangeList)) }) + scheme.AddTypeDefaultingFunc(&corev1.Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*corev1.Namespace)) }) + scheme.AddTypeDefaultingFunc(&corev1.NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*corev1.NamespaceList)) }) + scheme.AddTypeDefaultingFunc(&corev1.Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*corev1.Node)) }) + scheme.AddTypeDefaultingFunc(&corev1.NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*corev1.NodeList)) }) + scheme.AddTypeDefaultingFunc(&corev1.PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*corev1.PersistentVolume)) }) + scheme.AddTypeDefaultingFunc(&corev1.PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*corev1.PersistentVolumeClaim)) }) + scheme.AddTypeDefaultingFunc(&corev1.PersistentVolumeClaimList{}, func(obj interface{}) { + SetObjectDefaults_PersistentVolumeClaimList(obj.(*corev1.PersistentVolumeClaimList)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) }) - scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) }) - scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) }) - scheme.AddTypeDefaultingFunc(&v1.PodStatusResult{}, func(obj interface{}) { SetObjectDefaults_PodStatusResult(obj.(*v1.PodStatusResult)) }) - scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) }) - scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) }) - scheme.AddTypeDefaultingFunc(&v1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*v1.ReplicationController)) }) - scheme.AddTypeDefaultingFunc(&v1.ReplicationControllerList{}, func(obj interface{}) { - SetObjectDefaults_ReplicationControllerList(obj.(*v1.ReplicationControllerList)) + scheme.AddTypeDefaultingFunc(&corev1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*corev1.PersistentVolumeList)) }) + scheme.AddTypeDefaultingFunc(&corev1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*corev1.Pod)) }) + scheme.AddTypeDefaultingFunc(&corev1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*corev1.PodList)) }) + scheme.AddTypeDefaultingFunc(&corev1.PodLogOptions{}, func(obj interface{}) { SetObjectDefaults_PodLogOptions(obj.(*corev1.PodLogOptions)) }) + scheme.AddTypeDefaultingFunc(&corev1.PodStatusResult{}, func(obj interface{}) { SetObjectDefaults_PodStatusResult(obj.(*corev1.PodStatusResult)) }) + scheme.AddTypeDefaultingFunc(&corev1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*corev1.PodTemplate)) }) + scheme.AddTypeDefaultingFunc(&corev1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*corev1.PodTemplateList)) }) + scheme.AddTypeDefaultingFunc(&corev1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*corev1.ReplicationController)) }) + scheme.AddTypeDefaultingFunc(&corev1.ReplicationControllerList{}, func(obj interface{}) { + SetObjectDefaults_ReplicationControllerList(obj.(*corev1.ReplicationControllerList)) }) - scheme.AddTypeDefaultingFunc(&v1.ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*v1.ResourceQuota)) }) - scheme.AddTypeDefaultingFunc(&v1.ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*v1.ResourceQuotaList)) }) - scheme.AddTypeDefaultingFunc(&v1.Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*v1.Secret)) }) - scheme.AddTypeDefaultingFunc(&v1.SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*v1.SecretList)) }) - scheme.AddTypeDefaultingFunc(&v1.Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*v1.Service)) }) - scheme.AddTypeDefaultingFunc(&v1.ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*v1.ServiceList)) }) + scheme.AddTypeDefaultingFunc(&corev1.ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*corev1.ResourceQuota)) }) + scheme.AddTypeDefaultingFunc(&corev1.ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*corev1.ResourceQuotaList)) }) + scheme.AddTypeDefaultingFunc(&corev1.Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*corev1.Secret)) }) + scheme.AddTypeDefaultingFunc(&corev1.SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*corev1.SecretList)) }) + scheme.AddTypeDefaultingFunc(&corev1.Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*corev1.Service)) }) + scheme.AddTypeDefaultingFunc(&corev1.ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*corev1.ServiceList)) }) return nil } -func SetObjectDefaults_ConfigMap(in *v1.ConfigMap) { +func SetObjectDefaults_ConfigMap(in *corev1.ConfigMap) { SetDefaults_ConfigMap(in) } -func SetObjectDefaults_ConfigMapList(in *v1.ConfigMapList) { +func SetObjectDefaults_ConfigMapList(in *corev1.ConfigMapList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_ConfigMap(a) } } -func SetObjectDefaults_Endpoints(in *v1.Endpoints) { +func SetObjectDefaults_Endpoints(in *corev1.Endpoints) { SetDefaults_Endpoints(in) } -func SetObjectDefaults_EndpointsList(in *v1.EndpointsList) { +func SetObjectDefaults_EndpointsList(in *corev1.EndpointsList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_Endpoints(a) } } -func SetObjectDefaults_LimitRange(in *v1.LimitRange) { +func SetObjectDefaults_LimitRange(in *corev1.LimitRange) { for i := range in.Spec.Limits { a := &in.Spec.Limits[i] SetDefaults_LimitRangeItem(a) @@ -98,39 +99,39 @@ func SetObjectDefaults_LimitRange(in *v1.LimitRange) { } } -func SetObjectDefaults_LimitRangeList(in *v1.LimitRangeList) { +func SetObjectDefaults_LimitRangeList(in *corev1.LimitRangeList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_LimitRange(a) } } -func SetObjectDefaults_Namespace(in *v1.Namespace) { +func SetObjectDefaults_Namespace(in *corev1.Namespace) { SetDefaults_Namespace(in) SetDefaults_NamespaceStatus(&in.Status) } -func SetObjectDefaults_NamespaceList(in *v1.NamespaceList) { +func SetObjectDefaults_NamespaceList(in *corev1.NamespaceList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_Namespace(a) } } -func SetObjectDefaults_Node(in *v1.Node) { +func SetObjectDefaults_Node(in *corev1.Node) { SetDefaults_NodeStatus(&in.Status) SetDefaults_ResourceList(&in.Status.Capacity) SetDefaults_ResourceList(&in.Status.Allocatable) } -func SetObjectDefaults_NodeList(in *v1.NodeList) { +func SetObjectDefaults_NodeList(in *corev1.NodeList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_Node(a) } } -func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { +func SetObjectDefaults_PersistentVolume(in *corev1.PersistentVolume) { SetDefaults_PersistentVolume(in) SetDefaults_ResourceList(&in.Spec.Capacity) if in.Spec.PersistentVolumeSource.HostPath != nil { @@ -154,7 +155,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { } if in.Spec.PersistentVolumeSource.AzureDisk != nil { if in.Spec.PersistentVolumeSource.AzureDisk.CachingMode == nil { - ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite) + ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite) in.Spec.PersistentVolumeSource.AzureDisk.CachingMode = &ptrVar1 } if in.Spec.PersistentVolumeSource.AzureDisk.FSType == nil { @@ -166,7 +167,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { in.Spec.PersistentVolumeSource.AzureDisk.ReadOnly = &ptrVar1 } if in.Spec.PersistentVolumeSource.AzureDisk.Kind == nil { - ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk) + ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk) in.Spec.PersistentVolumeSource.AzureDisk.Kind = &ptrVar1 } } @@ -180,7 +181,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { } } -func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) { +func SetObjectDefaults_PersistentVolumeClaim(in *corev1.PersistentVolumeClaim) { SetDefaults_PersistentVolumeClaim(in) SetDefaults_PersistentVolumeClaimSpec(&in.Spec) SetDefaults_ResourceList(&in.Spec.Resources.Limits) @@ -189,21 +190,21 @@ func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) { SetDefaults_ResourceList(&in.Status.AllocatedResources) } -func SetObjectDefaults_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList) { +func SetObjectDefaults_PersistentVolumeClaimList(in *corev1.PersistentVolumeClaimList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_PersistentVolumeClaim(a) } } -func SetObjectDefaults_PersistentVolumeList(in *v1.PersistentVolumeList) { +func SetObjectDefaults_PersistentVolumeList(in *corev1.PersistentVolumeList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_PersistentVolume(a) } } -func SetObjectDefaults_Pod(in *v1.Pod) { +func SetObjectDefaults_Pod(in *corev1.Pod) { SetDefaults_Pod(in) SetDefaults_PodSpec(&in.Spec) for i := range in.Spec.Volumes { @@ -245,7 +246,7 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } if a.VolumeSource.AzureDisk != nil { if a.VolumeSource.AzureDisk.CachingMode == nil { - ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite) + ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite) a.VolumeSource.AzureDisk.CachingMode = &ptrVar1 } if a.VolumeSource.AzureDisk.FSType == nil { @@ -257,7 +258,7 @@ func SetObjectDefaults_Pod(in *v1.Pod) { a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1 } if a.VolumeSource.AzureDisk.Kind == nil { - ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk) + ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk) a.VolumeSource.AzureDisk.Kind = &ptrVar1 } } @@ -499,6 +500,10 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } } SetDefaults_ResourceList(&in.Spec.Overhead) + if in.Spec.Resources != nil { + SetDefaults_ResourceList(&in.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Resources.Requests) + } for i := range in.Status.InitContainerStatuses { a := &in.Status.InitContainerStatuses[i] SetDefaults_ResourceList(&a.AllocatedResources) @@ -525,14 +530,18 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } } -func SetObjectDefaults_PodList(in *v1.PodList) { +func SetObjectDefaults_PodList(in *corev1.PodList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_Pod(a) } } -func SetObjectDefaults_PodStatusResult(in *v1.PodStatusResult) { +func SetObjectDefaults_PodLogOptions(in *corev1.PodLogOptions) { + SetDefaults_PodLogOptions(in) +} + +func SetObjectDefaults_PodStatusResult(in *corev1.PodStatusResult) { for i := range in.Status.InitContainerStatuses { a := &in.Status.InitContainerStatuses[i] SetDefaults_ResourceList(&a.AllocatedResources) @@ -559,7 +568,7 @@ func SetObjectDefaults_PodStatusResult(in *v1.PodStatusResult) { } } -func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { +func SetObjectDefaults_PodTemplate(in *corev1.PodTemplate) { SetDefaults_PodSpec(&in.Template.Spec) for i := range in.Template.Spec.Volumes { a := &in.Template.Spec.Volumes[i] @@ -600,7 +609,7 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { } if a.VolumeSource.AzureDisk != nil { if a.VolumeSource.AzureDisk.CachingMode == nil { - ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite) + ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite) a.VolumeSource.AzureDisk.CachingMode = &ptrVar1 } if a.VolumeSource.AzureDisk.FSType == nil { @@ -612,7 +621,7 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1 } if a.VolumeSource.AzureDisk.Kind == nil { - ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk) + ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk) a.VolumeSource.AzureDisk.Kind = &ptrVar1 } } @@ -854,16 +863,20 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { } } SetDefaults_ResourceList(&in.Template.Spec.Overhead) + if in.Template.Spec.Resources != nil { + SetDefaults_ResourceList(&in.Template.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Template.Spec.Resources.Requests) + } } -func SetObjectDefaults_PodTemplateList(in *v1.PodTemplateList) { +func SetObjectDefaults_PodTemplateList(in *corev1.PodTemplateList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_PodTemplate(a) } } -func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { +func SetObjectDefaults_ReplicationController(in *corev1.ReplicationController) { SetDefaults_ReplicationController(in) if in.Spec.Template != nil { SetDefaults_PodSpec(&in.Spec.Template.Spec) @@ -906,7 +919,7 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { } if a.VolumeSource.AzureDisk != nil { if a.VolumeSource.AzureDisk.CachingMode == nil { - ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite) + ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite) a.VolumeSource.AzureDisk.CachingMode = &ptrVar1 } if a.VolumeSource.AzureDisk.FSType == nil { @@ -918,7 +931,7 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1 } if a.VolumeSource.AzureDisk.Kind == nil { - ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk) + ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk) a.VolumeSource.AzureDisk.Kind = &ptrVar1 } } @@ -1160,41 +1173,45 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { } } SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead) + if in.Spec.Template.Spec.Resources != nil { + SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests) + } } } -func SetObjectDefaults_ReplicationControllerList(in *v1.ReplicationControllerList) { +func SetObjectDefaults_ReplicationControllerList(in *corev1.ReplicationControllerList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_ReplicationController(a) } } -func SetObjectDefaults_ResourceQuota(in *v1.ResourceQuota) { +func SetObjectDefaults_ResourceQuota(in *corev1.ResourceQuota) { SetDefaults_ResourceList(&in.Spec.Hard) SetDefaults_ResourceList(&in.Status.Hard) SetDefaults_ResourceList(&in.Status.Used) } -func SetObjectDefaults_ResourceQuotaList(in *v1.ResourceQuotaList) { +func SetObjectDefaults_ResourceQuotaList(in *corev1.ResourceQuotaList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_ResourceQuota(a) } } -func SetObjectDefaults_Secret(in *v1.Secret) { +func SetObjectDefaults_Secret(in *corev1.Secret) { SetDefaults_Secret(in) } -func SetObjectDefaults_SecretList(in *v1.SecretList) { +func SetObjectDefaults_SecretList(in *corev1.SecretList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_Secret(a) } } -func SetObjectDefaults_Service(in *v1.Service) { +func SetObjectDefaults_Service(in *corev1.Service) { SetDefaults_Service(in) for i := range in.Spec.Ports { a := &in.Spec.Ports[i] @@ -1204,7 +1221,7 @@ func SetObjectDefaults_Service(in *v1.Service) { } } -func SetObjectDefaults_ServiceList(in *v1.ServiceList) { +func SetObjectDefaults_ServiceList(in *corev1.ServiceList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_Service(a) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go index 398a1cb3a..fd9779779 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go @@ -54,7 +54,7 @@ func ValidateSignerName(fldPath *field.Path, signerName string) field.ErrorList // validate that segments[0] is less than 253 characters altogether maxDomainSegmentLength := validation.DNS1123SubdomainMaxLength if len(segments[0]) > maxDomainSegmentLength { - el = append(el, field.TooLong(fldPath, segments[0], maxDomainSegmentLength)) + el = append(el, field.TooLong(fldPath, "" /*unused*/, maxDomainSegmentLength)) } // validate that segments[0] consists of valid DNS1123 labels separated by '.' domainLabels := strings.Split(segments[0], ".") @@ -97,7 +97,7 @@ func ValidateSignerName(fldPath *field.Path, signerName string) field.ErrorList maxPathSegmentLength := validation.DNS1123SubdomainMaxLength + validation.DNS1123LabelMaxLength + 1 maxSignerNameLength := maxDomainSegmentLength + maxPathSegmentLength + 1 if len(signerName) > maxSignerNameLength { - el = append(el, field.TooLong(fldPath, signerName, maxSignerNameLength)) + el = append(el, field.TooLong(fldPath, "" /*unused*/, maxSignerNameLength)) } return el diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index 248d57d23..694896ee7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -31,6 +31,8 @@ import ( "unicode/utf8" "github.com/google/go-cmp/cmp" + netutils "k8s.io/utils/net" + v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" @@ -45,8 +47,10 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" utilsysctl "k8s.io/component-helpers/node/util/sysctl" + resourcehelper "k8s.io/component-helpers/resource" schedulinghelper "k8s.io/component-helpers/scheduling/corev1" kubeletapis "k8s.io/kubelet/pkg/apis" + apiservice "k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" @@ -57,7 +61,6 @@ import ( "k8s.io/kubernetes/pkg/cluster/ports" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/fieldpath" - netutils "k8s.io/utils/net" ) const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg @@ -145,6 +148,15 @@ func ValidateQualifiedName(value string, fldPath *field.Path) field.ErrorList { return allErrs } +// ValidateDNS1123SubdomainWithUnderScore validates that a name is a proper DNS subdomain but allows for an underscore in the string +func ValidateDNS1123SubdomainWithUnderScore(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123SubdomainWithUnderscore(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + // ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain. func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -322,7 +334,7 @@ func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList // validateOverhead can be used to check whether the given Overhead is valid. func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { // reuse the ResourceRequirements validation logic - return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts) + return ValidateContainerResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts) } // Validates that given value is not negative. @@ -732,7 +744,7 @@ func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volNam } } } - if opts.AllowImageVolumeSource && source.Image != nil { + if source.Image != nil { if numVolumes > 0 { allErrs = append(allErrs, field.Forbidden(fldPath.Child("image"), "may not specify more than 1 volume type")) } else { @@ -1064,7 +1076,6 @@ func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *fi if file.ResourceFieldRef != nil { allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) } - allErrs = append(allErrs, validateDownwardAPIHostIPs(file.FieldRef, fldPath.Child("fieldRef"), opts)...) } else if file.ResourceFieldRef != nil { localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), true)...) @@ -1676,7 +1687,7 @@ func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorLi } if len(driverName) > 63 { - allErrs = append(allErrs, field.TooLong(fldPath, driverName, 63)) + allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, 63)) } for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) { @@ -2646,7 +2657,6 @@ func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValida if ev.ValueFrom.FieldRef != nil { numSources++ allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) - allErrs = append(allErrs, validateDownwardAPIHostIPs(ev.ValueFrom.FieldRef, fldPath.Child("fieldRef"), opts)...) } if ev.ValueFrom.ResourceFieldRef != nil { numSources++ @@ -2710,16 +2720,6 @@ func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets return allErrs } -func validateDownwardAPIHostIPs(fieldSel *core.ObjectFieldSelector, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { - allErrs := field.ErrorList{} - if !opts.AllowHostIPsField { - if fieldSel.FieldPath == "status.hostIPs" { - allErrs = append(allErrs, field.Forbidden(fldPath, "may not be set when feature gate 'PodHostIPs' is not enabled")) - } - } - return allErrs -} - func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.Set[string], prefixes *sets.Set[string], fldPath *field.Path, volume bool) field.ErrorList { allErrs := field.ErrorList{} @@ -2931,14 +2931,12 @@ func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]strin } // Disallow subPath/subPathExpr for image volumes - if opts.AllowImageVolumeSource { - if v, ok := volumes[mnt.Name]; ok && v.Image != nil { - if len(mnt.SubPath) != 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("subPath"), mnt.SubPath, "not allowed in image volume sources")) - } - if len(mnt.SubPathExpr) != 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("subPathExpr"), mnt.SubPathExpr, "not allowed in image volume sources")) - } + if v, ok := volumes[mnt.Name]; ok && v.Image != nil { + if len(mnt.SubPath) != 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("subPath"), mnt.SubPath, "not allowed in image volume sources")) + } + if len(mnt.SubPathExpr) != 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("subPathExpr"), mnt.SubPathExpr, "not allowed in image volume sources")) } } @@ -3061,52 +3059,52 @@ func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResource return allErrs } -func validateLivenessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateLivenessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath, opts)...) if probe.SuccessThreshold != 1 { allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1")) } return allErrs } -func validateReadinessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateReadinessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath, opts)...) if probe.TerminationGracePeriodSeconds != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), probe.TerminationGracePeriodSeconds, "must not be set for readinessProbes")) } return allErrs } -func validateStartupProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateStartupProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath, opts)...) if probe.SuccessThreshold != 1 { allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1")) } return allErrs } -func validateProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), gracePeriod, fldPath)...) + allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), gracePeriod, fldPath, opts)...) allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) @@ -3162,14 +3160,21 @@ func handlerFromLifecycle(lh *core.LifecycleHandler) commonHandler { } } -func validateSleepAction(sleep *core.SleepAction, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateSleepAction(sleep *core.SleepAction, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrors := field.ErrorList{} // We allow gracePeriod to be nil here because the pod in which this SleepAction // is defined might have an invalid grace period defined, and we don't want to // flag another error here when the real problem will already be flagged. - if gracePeriod != nil && sleep.Seconds <= 0 || sleep.Seconds > *gracePeriod { - invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d)", *gracePeriod) - allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr)) + if opts.AllowPodLifecycleSleepActionZeroValue { + if gracePeriod != nil && (sleep.Seconds < 0 || sleep.Seconds > *gracePeriod) { + invalidStr := fmt.Sprintf("must be non-negative and less than terminationGracePeriodSeconds (%d)", *gracePeriod) + allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr)) + } + } else { + if gracePeriod != nil && (sleep.Seconds <= 0 || sleep.Seconds > *gracePeriod) { + invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d). Enable AllowPodLifecycleSleepActionZeroValue feature gate for zero sleep.", *gracePeriod) + allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr)) + } } return allErrors } @@ -3282,7 +3287,7 @@ func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) fie func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList { return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port")) } -func validateHandler(handler commonHandler, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateHandler(handler commonHandler, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { numHandlers := 0 allErrors := field.ErrorList{} if handler.Exec != nil { @@ -3322,7 +3327,7 @@ func validateHandler(handler commonHandler, gracePeriod *int64, fldPath *field.P allErrors = append(allErrors, field.Forbidden(fldPath.Child("sleep"), "may not specify more than 1 handler type")) } else { numHandlers++ - allErrors = append(allErrors, validateSleepAction(handler.Sleep, gracePeriod, fldPath.Child("sleep"))...) + allErrors = append(allErrors, validateSleepAction(handler.Sleep, gracePeriod, fldPath.Child("sleep"), opts)...) } } if numHandlers == 0 { @@ -3331,13 +3336,13 @@ func validateHandler(handler commonHandler, gracePeriod *int64, fldPath *field.P return allErrors } -func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod *int64, fldPath *field.Path) field.ErrorList { +func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if lifecycle.PostStart != nil { - allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), gracePeriod, fldPath.Child("postStart"))...) + allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), gracePeriod, fldPath.Child("postStart"), opts)...) } if lifecycle.PreStop != nil { - allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), gracePeriod, fldPath.Child("preStop"))...) + allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), gracePeriod, fldPath.Child("preStop"), opts)...) } return allErrs } @@ -3516,11 +3521,11 @@ func validateInitContainers(containers []core.Container, regularContainers []cor switch { case restartAlways: if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, idxPath.Child("lifecycle"))...) + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, idxPath.Child("lifecycle"), opts)...) } - allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, idxPath.Child("livenessProbe"))...) - allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, idxPath.Child("readinessProbe"))...) - allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, idxPath.Child("startupProbe"))...) + allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, idxPath.Child("livenessProbe"), opts)...) + allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, idxPath.Child("readinessProbe"), opts)...) + allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, idxPath.Child("startupProbe"), opts)...) default: // These fields are disallowed for init containers. @@ -3538,7 +3543,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor } } - if len(ctr.ResizePolicy) > 0 { + if !opts.AllowSidecarResizePolicy && len(ctr.ResizePolicy) > 0 { allErrs = append(allErrs, field.Invalid(idxPath.Child("resizePolicy"), ctr.ResizePolicy, "must not be set for init containers")) } } @@ -3585,7 +3590,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"), opts)...) allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...) allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...) - allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...) + allErrs = append(allErrs, ValidateContainerResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...) allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"), podRestartPolicy)...) allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"), hostUsers)...) return allErrs @@ -3648,11 +3653,11 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol // Regular init container and ephemeral container validation will return // field.Forbidden() for these paths. if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, path.Child("lifecycle"))...) + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, path.Child("lifecycle"), opts)...) } - allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, path.Child("livenessProbe"))...) - allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, path.Child("readinessProbe"))...) - allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, path.Child("startupProbe"))...) + allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, path.Child("livenessProbe"), opts)...) + allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, path.Child("readinessProbe"), opts)...) + allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, path.Child("startupProbe"), opts)...) // These fields are disallowed for regular containers if ctr.RestartPolicy != nil { @@ -3779,10 +3784,18 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars { allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", MaxDNSSearchListChars))) } + for i, search := range dnsConfig.Searches { - // it is fine to have a trailing dot - search = strings.TrimSuffix(search, ".") - allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...) + if opts.AllowRelaxedDNSSearchValidation { + if search != "." { + search = strings.TrimSuffix(search, ".") + allErrs = append(allErrs, ValidateDNS1123SubdomainWithUnderScore(search, fldPath.Child("searches").Index(i))...) + } + } else { + search = strings.TrimSuffix(search, ".") + allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...) + } + } // Validate options. for i, option := range dnsConfig.Options { @@ -3975,7 +3988,7 @@ func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) fie } case core.TolerationOpExists: if len(toleration.Value) > 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, "value must be empty when `operator` is 'Exists'")) } default: validValues := []core.TolerationOperator{core.TolerationOpEqual, core.TolerationOpExists} @@ -4019,8 +4032,6 @@ type PodValidationOptions struct { AllowInvalidLabelValueInSelector bool // Allow pod spec to use non-integer multiple of huge page unit size AllowIndivisibleHugePagesValues bool - // Allow pod spec to use status.hostIPs in downward API if feature is enabled - AllowHostIPsField bool // Allow invalid topologySpreadConstraint labelSelector for backward compatibility AllowInvalidTopologySpreadConstraintLabelSelector bool // Allow projected token volumes with non-local paths @@ -4032,8 +4043,16 @@ type PodValidationOptions struct { ResourceIsPod bool // Allow relaxed validation of environment variable names AllowRelaxedEnvironmentVariableValidation bool - // Allow the use of the ImageVolumeSource API. - AllowImageVolumeSource bool + // Allow the use of a relaxed DNS search + AllowRelaxedDNSSearchValidation bool + // Allows zero value for Pod Lifecycle Sleep Action + AllowPodLifecycleSleepActionZeroValue bool + // Allow only Recursive value of SELinuxChangePolicy. + AllowOnlyRecursiveSELinuxChangePolicy bool + // Indicates whether PodLevelResources feature is enabled or disabled. + PodLevelResourcesEnabled bool + // Allow sidecar containers resize policy for backward compatibility + AllowSidecarResizePolicy bool } // validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set, @@ -4188,6 +4207,11 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("containers"), opts, &spec.RestartPolicy, hostUsers)...) allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("initContainers"), opts, &spec.RestartPolicy, hostUsers)...) allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts, &spec.RestartPolicy, hostUsers)...) + + if opts.PodLevelResourcesEnabled { + allErrs = append(allErrs, validatePodResources(spec, podClaimNames, fldPath.Child("resources"), opts)...) + } + allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) @@ -4268,6 +4292,77 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi return allErrs } +func validatePodResources(spec *core.PodSpec, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList { + if spec.Resources == nil { + return nil + } + + allErrs := field.ErrorList{} + + if spec.Resources.Claims != nil { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("claims"), "claims cannot be set for Resources at pod-level")) + } + + // validatePodResourceRequirements checks if resource names and quantities are + // valid, and requests are less than limits. + allErrs = append(allErrs, validatePodResourceRequirements(spec.Resources, podClaimNames, fldPath, opts)...) + allErrs = append(allErrs, validatePodResourceConsistency(spec, fldPath)...) + return allErrs +} + +// validatePodResourceConsistency checks if aggregate container-level requests are +// less than or equal to pod-level requests, and individual container-level limits +// are less than or equal to pod-level limits. +func validatePodResourceConsistency(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // Convert the *core.PodSpec to *v1.PodSpec to satisfy the call to + // resourcehelper.PodRequests method, in the subsequent lines, + // which requires a *v1.Pod object (containing a *v1.PodSpec). + v1PodSpec := &v1.PodSpec{} + // TODO(ndixita): Convert_core_PodSpec_To_v1_PodSpec is risky. Add a copy of + // AggregateContainerRequests against internal core.Pod type for beta release of + // PodLevelResources feature. + if err := corev1.Convert_core_PodSpec_To_v1_PodSpec(spec, v1PodSpec, nil); err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("invalid %q: %v", fldPath, err.Error()))) + } + + reqPath := fldPath.Child("requests") + // resourcehelper.AggregateContainerRequests method requires a Pod object to + // calculate the total requests requirements of a pod. Hence a Pod object using + // v1PodSpec i.e. (&v1.Pod{Spec: *v1PodSpec}, is created on the fly, and passed + // to the AggregateContainerRequests method to facilitate proper resource + // calculation without modifying AggregateContainerRequests method. + aggrContainerReqs := resourcehelper.AggregateContainerRequests(&v1.Pod{Spec: *v1PodSpec}, resourcehelper.PodResourcesOptions{}) + + // Pod-level requests must be >= aggregate requests of all containers in a pod. + for resourceName, ctrReqs := range aggrContainerReqs { + key := resourceName.String() + podSpecRequests := spec.Resources.Requests[core.ResourceName(key)] + + fldPath := reqPath.Key(key) + if ctrReqs.Cmp(podSpecRequests) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, podSpecRequests.String(), fmt.Sprintf("must be greater than or equal to aggregate container requests of %s", ctrReqs.String()))) + } + } + + // Individual Container limits must be <= Pod-level limits. + for i, ctr := range spec.Containers { + for resourceName, ctrLimit := range ctr.Resources.Limits { + podSpecLimits, exists := spec.Resources.Limits[core.ResourceName(resourceName.String())] + if !exists { + continue + } + + if ctrLimit.Cmp(podSpecLimits) > 0 { + fldPath := fldPath.Child("containers").Index(i).Key(resourceName.String()).Child("limits") + allErrs = append(allErrs, field.Invalid(fldPath, ctrLimit.String(), fmt.Sprintf("must be less than or equal to pod limits of %s", podSpecLimits.String()))) + } + } + } + return allErrs +} + func validateLinux(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} securityContext := spec.SecurityContext @@ -4332,6 +4427,9 @@ func validateWindows(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { if securityContext.SupplementalGroupsPolicy != nil { allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("supplementalGroupsPolicy"), "cannot be set for a windows pod")) } + if securityContext.SELinuxChangePolicy != nil { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("seLinuxChangePolicy"), "cannot be set for a windows pod")) + } } podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool { // validate container security context @@ -4379,7 +4477,7 @@ func validateWindows(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { } // ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data -func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, allowInvalidLabelValueInRequiredNodeAffinity bool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} switch rq.Operator { case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn: @@ -4398,9 +4496,15 @@ func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *f default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) } - allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) - + if !allowInvalidLabelValueInRequiredNodeAffinity { + path := fldPath.Child("values") + for valueIndex, value := range rq.Values { + for _, msg := range validation.IsValidLabelValue(value) { + allErrs = append(allErrs, field.Invalid(path.Index(valueIndex), value, msg)) + } + } + } return allErrs } @@ -4436,11 +4540,11 @@ func ValidateNodeFieldSelectorRequirement(req core.NodeSelectorRequirement, fldP } // ValidateNodeSelectorTerm tests that the specified node selector term has valid data -func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, allowInvalidLabelValueInRequiredNodeAffinity bool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for j, req := range term.MatchExpressions { - allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) + allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, allowInvalidLabelValueInRequiredNodeAffinity, fldPath.Child("matchExpressions").Index(j))...) } for j, req := range term.MatchFields { @@ -4451,7 +4555,7 @@ func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) f } // ValidateNodeSelector tests that the specified nodeSelector fields has valid data -func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelector(nodeSelector *core.NodeSelector, allowInvalidLabelValueInRequiredNodeAffinity bool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} termFldPath := fldPath.Child("nodeSelectorTerms") @@ -4460,7 +4564,7 @@ func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) } for i, term := range nodeSelector.NodeSelectorTerms { - allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) + allErrs = append(allErrs, ValidateNodeSelectorTerm(term, allowInvalidLabelValueInRequiredNodeAffinity, termFldPath.Index(i))...) } return allErrs @@ -4561,7 +4665,9 @@ func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldP allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) } - allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) + // we always allow invalid label-value for preferred affinity + // as they can success when cluster has only one node + allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, true, fldPath.Index(i).Child("preference"))...) } return allErrs } @@ -4631,7 +4737,7 @@ func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.Erro // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) // } if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, true /* TODO: opts.AllowInvalidLabelValueInRequiredNodeAffinity */, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) } if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) @@ -4743,7 +4849,7 @@ func ValidateAppArmorProfileField(profile *core.AppArmorProfile, fldPath *field. const maxLocalhostProfileLength = 4095 // PATH_MAX - 1 if len(*profile.LocalhostProfile) > maxLocalhostProfileLength { - allErrs = append(allErrs, field.TooLongMaxLength(fldPath.Child("localhostProfile"), *profile.LocalhostProfile, maxLocalhostProfileLength)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("localhostProfile"), "" /*unused*/, maxLocalhostProfileLength)) } } @@ -4917,6 +5023,28 @@ func ValidateHostSysctl(sysctl string, securityContext *core.PodSecurityContext, return nil } +var validSELinuxChangePolicies = sets.New(core.SELinuxChangePolicyRecursive, core.SELinuxChangePolicyMountOption) + +func validateSELinuxChangePolicy(seLinuxChangePolicy *core.PodSELinuxChangePolicy, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { + if seLinuxChangePolicy == nil { + return nil + } + + allErrs := field.ErrorList{} + + if opts.AllowOnlyRecursiveSELinuxChangePolicy { + if *seLinuxChangePolicy != core.SELinuxChangePolicyRecursive { + allErrs = append(allErrs, field.NotSupported(fldPath, *seLinuxChangePolicy, []core.PodSELinuxChangePolicy{core.SELinuxChangePolicyRecursive})) + } + } else { + // Allow any valid SELinuxChangePolicy value. + if !validSELinuxChangePolicies.Has(*seLinuxChangePolicy) { + allErrs = append(allErrs, field.NotSupported(fldPath, *seLinuxChangePolicy, sets.List(validSELinuxChangePolicies))) + } + } + return allErrs +} + // validatePodSpecSecurityContext verifies the SecurityContext of a PodSpec, // whether that is defined in a Pod or in an embedded PodSpec (e.g. a // Deployment's pod template). @@ -4963,6 +5091,10 @@ func validatePodSpecSecurityContext(securityContext *core.PodSecurityContext, sp if securityContext.SupplementalGroupsPolicy != nil { allErrs = append(allErrs, validateSupplementalGroupsPolicy(securityContext.SupplementalGroupsPolicy, fldPath.Child("supplementalGroupsPolicy"))...) } + + if securityContext.SELinuxChangePolicy != nil { + allErrs = append(allErrs, validateSELinuxChangePolicy(securityContext.SELinuxChangePolicy, fldPath.Child("seLinuxChangePolicy"), opts)...) + } } return allErrs @@ -5080,16 +5212,6 @@ var updatablePodSpecFields = []string{ "`spec.activeDeadlineSeconds`", "`spec.tolerations` (only additions to existing tolerations)", "`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)", - "`spec.containers[*].resources` (for CPU/memory only)", -} - -// TODO(vinaykul,InPlacePodVerticalScaling): Drop this var once InPlacePodVerticalScaling goes GA and featuregate is gone. -var updatablePodSpecFieldsNoResources = []string{ - "`spec.containers[*].image`", - "`spec.initContainers[*].image`", - "`spec.activeDeadlineSeconds`", - "`spec.tolerations` (only additions to existing tolerations)", - "`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)", } // ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields @@ -5151,45 +5273,12 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel return allErrs } - if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) { - allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable")) - } - // handle updateable fields by munging those fields prior to deep equal comparison. mungedPodSpec := *newPod.Spec.DeepCopy() // munge spec.containers[*].image var newContainers []core.Container for ix, container := range mungedPodSpec.Containers { container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone - // When the feature-gate is turned off, any new requests attempting to update CPU or memory - // resource values will result in validation failure. - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - // Resources are mutable for CPU & memory only - // - user can now modify Resources to express new desired Resources - mungeCpuMemResources := func(resourceList, oldResourceList core.ResourceList) core.ResourceList { - if oldResourceList == nil { - return nil - } - var mungedResourceList core.ResourceList - if resourceList == nil { - mungedResourceList = make(core.ResourceList) - } else { - mungedResourceList = resourceList.DeepCopy() - } - delete(mungedResourceList, core.ResourceCPU) - delete(mungedResourceList, core.ResourceMemory) - if cpu, found := oldResourceList[core.ResourceCPU]; found { - mungedResourceList[core.ResourceCPU] = cpu - } - if mem, found := oldResourceList[core.ResourceMemory]; found { - mungedResourceList[core.ResourceMemory] = mem - } - return mungedResourceList - } - lim := mungeCpuMemResources(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits) - req := mungeCpuMemResources(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests) - container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} - } newContainers = append(newContainers, container) } mungedPodSpec.Containers = newContainers @@ -5266,10 +5355,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel // This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is". // TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff specDiff := cmp.Diff(oldPod.Spec, mungedPodSpec) - errs := field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFieldsNoResources, ","), specDiff)) - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - errs = field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFields, ","), specDiff)) - } + errs := field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFields, ","), specDiff)) allErrs = append(allErrs, errs) } return allErrs @@ -5358,6 +5444,13 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions } } + // Pod QoS is immutable + allErrs = append(allErrs, ValidateImmutableField(newPod.Status.QOSClass, oldPod.Status.QOSClass, fldPath.Child("qosClass"))...) + + // Note: there is no check that ContainerStatuses, InitContainerStatuses, and EphemeralContainerStatuses doesn't have duplicate conatainer names + // or statuses of containers that are not defined in the pod spec. Changing this may lead to a breaking changes. So consumers of those fields + // must account for unexpected data. Kubelet will never report statuses like this. + // // If pod should not restart, make sure the status update does not transition // any terminated containers to a non-terminated state. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) @@ -5473,6 +5566,139 @@ func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodVali return allErrs } +// ValidatePodResize tests that a user update to pod container resources is valid. +// newPod and oldPod must only differ in their Containers[*].Resources and +// Containers[*].ResizePolicy field. +func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList { + // Part 1: Validate newPod's spec and updates to metadata + fldPath := field.NewPath("metadata") + allErrs := ValidateImmutableField(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) + allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...) + + // pods with pod-level resources cannot be resized + isPodLevelResourcesSet := func(pod *core.Pod) bool { + return pod.Spec.Resources != nil && + (len(pod.Spec.Resources.Requests)+len(pod.Spec.Resources.Limits) > 0) + } + + if isPodLevelResourcesSet(oldPod) || isPodLevelResourcesSet(newPod) { + return field.ErrorList{field.Forbidden(field.NewPath(""), "pods with pod-level resources cannot be resized")} + } + + // static pods cannot be resized. + if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok { + return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods cannot be resized")} + } + + // windows pods are not supported. + if oldPod.Spec.OS != nil && oldPod.Spec.OS.Name == core.Windows { + return field.ErrorList{field.Forbidden(field.NewPath(""), "windows pods cannot be resized")} + } + + // Part 2: Validate that the changes between oldPod.Spec.Containers[].Resources and + // newPod.Spec.Containers[].Resources are allowed. + specPath := field.NewPath("spec") + if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) { + allErrs = append(allErrs, field.Invalid(specPath, newPod.Status.QOSClass, "Pod QOS Class may not change as a result of resizing")) + } + + if !isPodResizeRequestSupported(*oldPod) { + allErrs = append(allErrs, field.Forbidden(specPath, "Pod running on node without support for resize")) + } + + // Do not allow removing resource requests/limits on resize. + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for ix, ctr := range oldPod.Spec.InitContainers { + if ctr.RestartPolicy != nil && *ctr.RestartPolicy != core.ContainerRestartPolicyAlways { + continue + } + if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("initContainers").Index(ix).Child("resources").Child("requests"), "resource requests cannot be removed")) + } + if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Limits, ctr.Resources.Limits) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("initContainers").Index(ix).Child("resources").Child("limits"), "resource limits cannot be removed")) + } + } + } + for ix, ctr := range oldPod.Spec.Containers { + if resourcesRemoved(newPod.Spec.Containers[ix].Resources.Requests, ctr.Resources.Requests) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("containers").Index(ix).Child("resources").Child("requests"), "resource requests cannot be removed")) + } + if resourcesRemoved(newPod.Spec.Containers[ix].Resources.Limits, ctr.Resources.Limits) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("containers").Index(ix).Child("resources").Child("limits"), "resource limits cannot be removed")) + } + } + + // Ensure that only CPU and memory resources are mutable. + originalCPUMemPodSpec := *newPod.Spec.DeepCopy() + var newContainers []core.Container + for ix, container := range originalCPUMemPodSpec.Containers { + dropCPUMemoryUpdates := func(resourceList, oldResourceList core.ResourceList) core.ResourceList { + if oldResourceList == nil { + return nil + } + var mungedResourceList core.ResourceList + if resourceList == nil { + mungedResourceList = make(core.ResourceList) + } else { + mungedResourceList = resourceList.DeepCopy() + } + delete(mungedResourceList, core.ResourceCPU) + delete(mungedResourceList, core.ResourceMemory) + if cpu, found := oldResourceList[core.ResourceCPU]; found { + mungedResourceList[core.ResourceCPU] = cpu + } + if mem, found := oldResourceList[core.ResourceMemory]; found { + mungedResourceList[core.ResourceMemory] = mem + } + return mungedResourceList + } + lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits) + req := dropCPUMemoryUpdates(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests) + container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} + container.ResizePolicy = oldPod.Spec.Containers[ix].ResizePolicy // +k8s:verify-mutation:reason=clone + newContainers = append(newContainers, container) + } + originalCPUMemPodSpec.Containers = newContainers + if !apiequality.Semantic.DeepEqual(originalCPUMemPodSpec, oldPod.Spec) { + // This likely means that the user has made changes to resources other than CPU and Memory. + specDiff := cmp.Diff(oldPod.Spec, originalCPUMemPodSpec) + errs := field.Forbidden(specPath, fmt.Sprintf("only cpu and memory resources are mutable\n%v", specDiff)) + allErrs = append(allErrs, errs) + } + return allErrs +} + +// isPodResizeRequestSupported checks whether the pod is running on a node with InPlacePodVerticalScaling enabled. +func isPodResizeRequestSupported(pod core.Pod) bool { + // TODO: Remove this after GA+3 releases of InPlacePodVerticalScaling + // This code handles the version skew as described in the KEP. + // For handling version skew we're only allowing to update the Pod's Resources + // if the Pod already has Pod.Status.ContainerStatuses[i].Resources. This means + // that the apiserver would only allow updates to Pods running on Nodes with + // the InPlacePodVerticalScaling feature gate enabled. + for _, c := range pod.Status.ContainerStatuses { + if c.State.Running != nil { + return c.Resources != nil + } + } + // No running containers. We cannot tell whether the node supports resize at this point, so we assume it does. + return true +} + +func resourcesRemoved(resourceList, oldResourceList core.ResourceList) bool { + if len(oldResourceList) > len(resourceList) { + return true + } + for name := range oldResourceList { + if _, ok := resourceList[name]; !ok { + return true + } + } + + return false +} + // ValidatePodBinding tests if required fields in the pod binding are legal. func ValidatePodBinding(binding *core.Binding) field.ErrorList { allErrs := field.ErrorList{} @@ -6333,6 +6559,22 @@ func validateContainerResourceName(value core.ResourceName, fldPath *field.Path) return allErrs } +// validatePodResourceName verifies that: +// 1. The resource name is a valid compute resource name for pod-level specification. +// 2. The resource is supported by the PodLevelResources feature. +func validatePodResourceName(resourceName core.ResourceName, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(resourceName, fldPath) + if len(allErrs) != 0 { + return allErrs + } + + if !resourcehelper.IsSupportedPodLevelResource(v1.ResourceName(resourceName)) { + return append(allErrs, field.NotSupported(fldPath, resourceName, sets.List(resourcehelper.SupportedPodLevelResources()))) + } + + return allErrs +} + // Validate resource names that can go in a resource quota // Refer to docs/design/resources.md for more details. func ValidateResourceQuotaResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList { @@ -6531,7 +6773,7 @@ func ValidateSecret(secret *core.Secret) field.ErrorList { totalSize += len(value) } if totalSize > core.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(dataPath, "", core.MaxSecretSize)) + allErrs = append(allErrs, field.TooLong(dataPath, "" /*unused*/, core.MaxSecretSize)) } switch secret.Type { @@ -6646,7 +6888,7 @@ func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList { } if totalSize > core.MaxSecretSize { // pass back "" to indicate that the error refers to the whole object. - allErrs = append(allErrs, field.TooLong(field.NewPath(""), cfg, core.MaxSecretSize)) + allErrs = append(allErrs, field.TooLong(field.NewPath(""), "" /*unused*/, core.MaxSecretSize)) } return allErrs @@ -6680,8 +6922,16 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel return field.ErrorList{} } +func validatePodResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList { + return validateResourceRequirements(requirements, validatePodResourceName, podClaimNames, fldPath, opts) +} + +func ValidateContainerResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList { + return validateResourceRequirements(requirements, validateContainerResourceName, podClaimNames, fldPath, opts) +} + // Validates resource requirement spec. -func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateResourceRequirements(requirements *core.ResourceRequirements, resourceNameFn func(core.ResourceName, *field.Path) field.ErrorList, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} limPath := fldPath.Child("limits") reqPath := fldPath.Child("requests") @@ -6694,7 +6944,7 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl fldPath := limPath.Key(string(resourceName)) // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(resourceName, fldPath)...) + allErrs = append(allErrs, resourceNameFn(resourceName, fldPath)...) // Validate resource quantity. allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...) @@ -6713,7 +6963,8 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl for resourceName, quantity := range requirements.Requests { fldPath := reqPath.Key(string(resourceName)) // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(resourceName, fldPath)...) + allErrs = append(allErrs, resourceNameFn(resourceName, fldPath)...) + // Validate resource quantity. allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...) @@ -7449,7 +7700,13 @@ func validateOS(podSpec *core.PodSpec, fldPath *field.Path, opts PodValidationOp return allErrs } -func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { +var validLogStreams = sets.New[string]( + core.LogStreamStdout, + core.LogStreamStderr, + core.LogStreamAll, +) + +func ValidatePodLogOptions(opts *core.PodLogOptions, allowStreamSelection bool) field.ErrorList { allErrs := field.ErrorList{} if opts.TailLines != nil && *opts.TailLines < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) @@ -7465,6 +7722,20 @@ func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) } } + if allowStreamSelection { + if opts.Stream == nil { + allErrs = append(allErrs, field.Required(field.NewPath("stream"), "must be specified")) + } else { + if !validLogStreams.Has(*opts.Stream) { + allErrs = append(allErrs, field.NotSupported(field.NewPath("stream"), *opts.Stream, validLogStreams.UnsortedList())) + } + if *opts.Stream != core.LogStreamAll && opts.TailLines != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "`tailLines` and specific `stream` are mutually exclusive for now")) + } + } + } else if opts.Stream != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("stream"), "may not be specified")) + } return allErrs } @@ -7520,7 +7791,7 @@ func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath * } if nodeAffinity.Required != nil { - allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, fldPath.Child("required"))...) + allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, true /* TODO: opts.AllowInvalidLabelValueInRequiredNodeAffinity */, fldPath.Child("required"))...) } else { allErrs = append(allErrs, field.Required(fldPath.Child("required"), "must specify required node constraints")) } @@ -8245,17 +8516,39 @@ func validateContainerStatusAllocatedResourcesStatus(containerStatuses []core.Co // ignore missing container, see https://github.com/kubernetes/kubernetes/issues/124915 if containerFound { found := false + var errorStr string - // get container resources from the spec - containerResources := container.Resources - for resourceName := range containerResources.Requests { - if resourceName == allocatedResource.Name { - found = true - break + if strings.HasPrefix(string(allocatedResource.Name), "claim:") { + // assume it is a claim name + + errorStr = "must match one of the container's resource claims in a format 'claim:/' or 'claim:' if request is empty" + + for _, c := range container.Resources.Claims { + name := "claim:" + c.Name + if c.Request != "" { + name += "/" + c.Request + } + + if name == string(allocatedResource.Name) { + found = true + break + } + } + + } else { + // assume it is a resource name + + errorStr = "must match one of the container's resource requests" + + for resourceName := range container.Resources.Requests { + if resourceName == allocatedResource.Name { + found = true + break + } } } if !found { - allErrors = append(allErrors, field.Invalid(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("name"), allocatedResource.Name, "must match one of the container's resource requirements")) + allErrors = append(allErrors, field.Invalid(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("name"), allocatedResource.Name, errorStr)) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index f7ea2a645..a9d32c1ca 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -3937,6 +3937,11 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { *out = new(int64) **out = **in } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } return } @@ -4181,6 +4186,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(AppArmorProfile) (*in).DeepCopyInto(*out) } + if in.SELinuxChangePolicy != nil { + in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy + *out = new(PodSELinuxChangePolicy) + **out = **in + } return } @@ -4363,6 +4373,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go index eac7560ad..9054a5ced 100644 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go @@ -68,11 +68,13 @@ func Setup(allowPrivileged bool, perConnectionBytesPerSec int64) { }) } -// SetForTests sets capabilities for tests. Convenience method for testing. This should only be called from tests. -func SetForTests(c Capabilities) { +// ResetForTest resets the capabilities to a given state for testing purposes. +// This function should only be called from tests. +func ResetForTest() { capInstance.lock.Lock() defer capInstance.lock.Unlock() - capInstance.capabilities = &c + capInstance.capabilities = nil + capInstance.once = sync.Once{} } // Get returns a read-only copy of the system capabilities. diff --git a/vendor/k8s.io/kubernetes/pkg/features/client_adapter.go b/vendor/k8s.io/kubernetes/pkg/features/client_adapter.go index de03d78ef..a24a1f873 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/client_adapter.go +++ b/vendor/k8s.io/kubernetes/pkg/features/client_adapter.go @@ -67,3 +67,10 @@ func (a *clientAdapter) Add(in map[clientfeatures.Feature]clientfeatures.Feature } return a.mfg.Add(out) } + +// Set implements the unexported interface that client-go feature gate testing expects for +// ek8s.io/client-go/features/testing.SetFeatureDuringTest. This is necessary for integration tests +// to set test overrides for client-go feature gates. +func (a *clientAdapter) Set(name clientfeatures.Feature, enabled bool) error { + return a.mfg.SetFromMap(map[string]bool{string(name): enabled}) +} diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 66adabcd8..262f396b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -17,12 +17,11 @@ limitations under the License. package features import ( - apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" "k8s.io/apimachinery/pkg/util/runtime" - genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" clientfeatures "k8s.io/client-go/features" "k8s.io/component-base/featuregate" + zpagesfeatures "k8s.io/component-base/zpages/features" ) const ( @@ -30,7 +29,6 @@ const ( // // // owner: @username // // kep: https://kep.k8s.io/NNN - // // alpha: v1.X // MyFeature featuregate.Feature = "MyFeature" // // Feature gates should be listed in alphabetical, case-sensitive @@ -38,13 +36,6 @@ const ( // of code conflicts because changes are more likely to be scattered // across the file. - // owner: @ttakahashi21 @mkimuram - // kep: https://kep.k8s.io/3294 - // alpha: v1.26 - // - // Enable usage of Provision of PVCs from snapshots in other namespaces - CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource" - // owner: @aojea // Deprecated: v1.31 // @@ -52,74 +43,58 @@ const ( // with DNS names. AllowDNSOnlyNodeCSR featuregate.Feature = "AllowDNSOnlyNodeCSR" + // owner: @HirazawaUi + // Deprecated: v1.32 + // + // Allow spec.terminationGracePeriodSeconds to be overridden by MaxPodGracePeriodSeconds in soft evictions. + AllowOverwriteTerminationGracePeriodSeconds featuregate.Feature = "AllowOverwriteTerminationGracePeriodSeconds" + // owner: @thockin - // deprecated: v1.29 + // Deprecated: v1.29 // // Enables Service.status.ingress.loadBanace to be set on // services of types other than LoadBalancer. AllowServiceLBStatusOnNonLB featuregate.Feature = "AllowServiceLBStatusOnNonLB" // owner: @bswartz - // alpha: v1.18 - // beta: v1.24 // // Enables usage of any object for volume data source in PVCs AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource" // owner: @tallclair - // beta: v1.4 - // GA: v1.31 AppArmor featuregate.Feature = "AppArmor" // owner: @tallclair - // beta: v1.30 - // GA: v1.31 AppArmorFields featuregate.Feature = "AppArmorFields" // owner: @liggitt // kep: - // alpha: v1.31 // // Make the Node authorizer use fine-grained selector authorization. // Requires AuthorizeWithSelectors to be enabled. AuthorizeNodeWithSelectors featuregate.Feature = "AuthorizeNodeWithSelectors" - // owner: @danwinship - // alpha: v1.27 - // beta: v1.29 - // GA: v1.30 - // - // Enables dual-stack --node-ip in kubelet with external cloud providers - CloudDualStackNodeIPs featuregate.Feature = "CloudDualStackNodeIPs" - // owner: @ahmedtd - // alpha: v1.26 // // Enable ClusterTrustBundle objects and Kubelet integration. ClusterTrustBundle featuregate.Feature = "ClusterTrustBundle" // owner: @ahmedtd - // alpha: v1.28 // // Enable ClusterTrustBundle Kubelet projected volumes. Depends on ClusterTrustBundle. ClusterTrustBundleProjection featuregate.Feature = "ClusterTrustBundleProjection" // owner: @szuecs - // alpha: v1.12 // // Enable nodes to change CPUCFSQuotaPeriod CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod" // owner: @ConnorDoyle, @fromanirh (only for GA graduation) - // alpha: v1.8 - // beta: v1.10 - // GA: v1.26 // // Alternative container-level CPU affinity policies. CPUManager featuregate.Feature = "CPUManager" // owner: @fromanirh - // alpha: v1.23 // beta: see below. // // Allow fine-tuning of cpumanager policies, experimental, alpha-quality options @@ -132,7 +107,6 @@ const ( CPUManagerPolicyAlphaOptions featuregate.Feature = "CPUManagerPolicyAlphaOptions" // owner: @fromanirh - // beta: v1.23 // beta: see below. // // Allow fine-tuning of cpumanager policies, experimental, beta-quality options @@ -146,64 +120,52 @@ const ( CPUManagerPolicyBetaOptions featuregate.Feature = "CPUManagerPolicyBetaOptions" // owner: @fromanirh - // alpha: v1.22 - // beta: v1.23 // // Allow the usage of options to fine-tune the cpumanager policies. CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions" // owner: @jefftree // kep: https://kep.k8s.io/4355 - // alpha: v1.31 // // Enables coordinated leader election in the API server CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection" // owner: @trierra // kep: http://kep.k8s.io/2589 - // alpha: v1.23 - // beta: v1.25 (off by default) - // beta: v1.31 (on by default) // // Enables the Portworx in-tree driver to Portworx migration feature. CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx" // owner: @fengzixu - // alpha: v1.21 // // Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it. CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth" - // owner: @nckturner - // kep: http://kep.k8s.io/2699 - // alpha: v1.27 - // Enable webhooks in cloud controller manager - CloudControllerManagerWebhook featuregate.Feature = "CloudControllerManagerWebhook" - // owner: @adrianreber // kep: https://kep.k8s.io/2008 - // alpha: v1.25 - // beta: v1.30 // // Enables container Checkpoint support in the kubelet ContainerCheckpoint featuregate.Feature = "ContainerCheckpoint" // owner: @helayoty - // beta: v1.28 + // kep: https://kep.k8s.io/4026 + // // Set the scheduled time as an annotation in the job. CronJobsScheduledAnnotation featuregate.Feature = "CronJobsScheduledAnnotation" + // owner: @ttakahashi21 @mkimuram + // kep: https://kep.k8s.io/3294 + // + // Enable usage of Provision of PVCs from snapshots in other namespaces + CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource" + // owner: @elezar // kep: http://kep.k8s.io/4009 - // alpha: v1.28 - // beta: v1.29 - // GA: v1.31 // // Add support for CDI Device IDs in the Device Plugin API. DevicePluginCDIDevices featuregate.Feature = "DevicePluginCDIDevices" // owner: @aojea - // alpha: v1.31 // // The apiservers with the MultiCIDRServiceAllocator feature enable, in order to support live migration from the old bitmap ClusterIP // allocators to the new IPAddress allocators introduced by the MultiCIDRServiceAllocator feature, performs a dual-write on @@ -211,16 +173,11 @@ const ( DisableAllocatorDualWrite featuregate.Feature = "DisableAllocatorDualWrite" // owner: @andrewsykim - // alpha: v1.22 - // beta: v1.29 - // GA: v1.31 // // Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag. DisableCloudProviders featuregate.Feature = "DisableCloudProviders" // owner: @andrewsykim - // alpha: v1.23 - // beta: v1.29 // // Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials. DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders" @@ -233,99 +190,119 @@ const ( // Remove in v1.33 AllowInsecureKubeletCertificateSigningRequests featuregate.Feature = "AllowInsecureKubeletCertificateSigningRequests" + // owner: @hoskeri + // Deprecated: v1.32 + // + // Restores previous behavior where Kubelet fails self registration if node create returns 403 Forbidden. + // Remove in v1.34 + KubeletRegistrationGetOnExistsOnly featuregate.Feature = "KubeletRegistrationGetOnExistsOnly" + // owner: @HirazawaUi // kep: http://kep.k8s.io/4004 - // Deprecated: v1.29 (default off) + // Deprecated: v1.31 (default off) + // // DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion" // owner: @pohly - // kep: http://kep.k8s.io/3063 - // alpha: v1.26 + // kep: http://kep.k8s.io/4381 // - // Enables support for resources with custom parameters and a lifecycle - // that is independent of a Pod. Resource allocation is done by a DRA driver's - // "control plane controller" in cooperation with the scheduler. - DRAControlPlaneController featuregate.Feature = "DRAControlPlaneController" + // Enables support for requesting admin access in a ResourceClaim. + // Admin access is granted even if a device is already in use and, + // depending on the DRA driver, may enable additional permissions + // when a container uses the allocated device. + // + // This feature gate is currently defined in KEP #4381. The intent + // is to move it into a separate KEP. + DRAAdminAccess featuregate.Feature = "DRAAdminAccess" // owner: @pohly // kep: http://kep.k8s.io/4381 - // alpha: v1.29 // // Enables support for resources with custom parameters and a lifecycle // that is independent of a Pod. Resource allocation is done by the scheduler // based on "structured parameters". DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation" + // owner: @LionelJouin + // kep: http://kep.k8s.io/4817 + // alpha: v1.32 + // + // Enables support the ResourceClaim.status.devices field and for setting this + // status from DRA drivers. + DRAResourceClaimDeviceStatus featuregate.Feature = "DRAResourceClaimDeviceStatus" + + // owner: @lauralorenz + // kep: https://kep.k8s.io/4603 + // owner: @lauralorenz + // kep: https://kep.k8s.io/4603 + // + // Enables support for configurable per-node backoff maximums for restarting + // containers (aka containers in CrashLoopBackOff) + KubeletCrashLoopBackOffMax featuregate.Feature = "KubeletCrashLoopBackOffMax" + // owner: @harche // kep: http://kep.k8s.io/3386 - // alpha: v1.25 // // Allows using event-driven PLEG (pod lifecycle event generator) through kubelet // which avoids frequent relisting of containers which helps optimize performance. EventedPLEG featuregate.Feature = "EventedPLEG" // owner: @andrewsykim @SergeyKanzhelev - // GA: v1.20 // // Ensure kubelet respects exec probe timeouts. Feature gate exists in-case existing workloads // may depend on old behavior where exec probe timeouts were ignored. // Lock to default and remove after v1.22 based on user feedback that should be reflected in KEP #1972 update ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout" - // owner: @jpbetz - // alpha: v1.30 - // Resource create requests using generateName are retried automatically by the apiserver - // if the generated name conflicts with an existing resource name, up to a maximum number of 7 retries. - RetryGenerateName featuregate.Feature = "RetryGenerateName" - // owner: @bobbypage - // alpha: v1.20 - // beta: v1.21 // Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown. GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown" // owner: @wzshiming - // alpha: v1.23 - // beta: v1.24 // Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown. GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority" - // owner: @arjunrn @mwielgus @josephburnett @sanposhiho - // kep: https://kep.k8s.io/1610 - // alpha: v1.20 - // beta: v1.27 - // GA: v1.30 - // - // Add support for the HPA to scale based on metrics from individual containers - // in target pods - HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics" - // owner: @dxist - // alpha: v1.16 // // Enables support of HPA scaling to zero pods when an object or custom metric is configured. HPAScaleToZero featuregate.Feature = "HPAScaleToZero" // owner: @deepakkinni @xing-yang // kep: https://kep.k8s.io/2644 - // alpha: v1.23 - // beta: v1.31 // // Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC // deletion ordering. HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy" + // owner: @vinaykul,@tallclair + // kep: http://kep.k8s.io/1287 + // + // Enables In-Place Pod Vertical Scaling + InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling" + + // owner: @tallclair + // kep: http://kep.k8s.io/1287 + // + // Enables the AllocatedResources field in container status. This feature requires + // InPlacePodVerticalScaling also be enabled. + InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus" + + // owner: @tallclair @esotsal + // alpha: v1.32 + // + // Allow resource resize for containers in Guaranteed pods with integer CPU requests ( default false ). + // Applies only in nodes with InPlacePodVerticalScaling and CPU Manager features enabled, and + // CPU Manager Static Policy option set. + InPlacePodVerticalScalingExclusiveCPUs featuregate.Feature = "InPlacePodVerticalScalingExclusiveCPUs" + // owner: @trierra - // alpha: v1.23 // // Disables the Portworx in-tree driver. InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister" // owner: @mimowo // kep: https://kep.k8s.io/3850 - // alpha: v1.28 - // beta: v1.29 // // Allows users to specify counting of failed pods per index. JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex" @@ -333,15 +310,13 @@ const ( // owner: @mimowo // kep: https://kep.k8s.io/4368 // alpha: v1.30 + // beta: v1.32 // // Allows to delegate reconciliation of a Job object to an external controller. JobManagedBy featuregate.Feature = "JobManagedBy" // owner: @mimowo // kep: https://kep.k8s.io/3329 - // alpha: v1.25 - // beta: v1.26 - // stable: v1.31 // // Allow users to specify handling of pod failures based on container exit codes // and pod conditions. @@ -349,8 +324,6 @@ const ( // owner: @kannon92 // kep : https://kep.k8s.io/3939 - // alpha: v1.28 - // beta: v1.29 // // Allow users to specify recreating pods of a job only when // pods have fully terminated. @@ -358,8 +331,6 @@ const ( // owner: @tenzen-y // kep: https://kep.k8s.io/3998 - // alpha: v1.30 - // beta: v1.31 // // Allow users to specify when a Job can be declared as succeeded // based on the set of succeeded pods. @@ -367,8 +338,6 @@ const ( // owner: @marquiz // kep: http://kep.k8s.io/4033 - // alpha: v1.28 - // beta: v1.31 // // Enable detection of the kubelet cgroup driver configuration option from // the CRI. The CRI runtime also needs to support this feature in which @@ -377,8 +346,14 @@ const ( // fallback to using it's cgroupDriver option. KubeletCgroupDriverFromCRI featuregate.Feature = "KubeletCgroupDriverFromCRI" + // owner: @vinayakankugoyal + // kep: http://kep.k8s.io/2862 + // + // Enable fine-grained kubelet API authorization for webhook based + // authorization. + KubeletFineGrainedAuthz featuregate.Feature = "KubeletFineGrainedAuthz" + // owner: @AkihiroSuda - // alpha: v1.22 // // Enables support for running kubelet in a user namespace. // The user namespace has to be created before running kubelet. @@ -386,21 +361,17 @@ const ( KubeletInUserNamespace featuregate.Feature = "KubeletInUserNamespace" // owner: @moshe010 - // alpha: v1.27 // // Enable POD resources API to return resources allocated by Dynamic Resource Allocation KubeletPodResourcesDynamicResources featuregate.Feature = "KubeletPodResourcesDynamicResources" // owner: @moshe010 - // alpha: v1.27 // // Enable POD resources API with Get method KubeletPodResourcesGet featuregate.Feature = "KubeletPodResourcesGet" // owner: @kannon92 // kep: https://kep.k8s.io/4191 - // alpha: v1.29 - // beta: v1.31 // // The split image filesystem feature enables kubelet to perform garbage collection // of images (read-only layers) and/or containers (writeable layers) deployed on @@ -409,34 +380,18 @@ const ( // owner: @sallyom // kep: https://kep.k8s.io/2832 - // alpha: v1.25 - // beta: v1.27 // // Add support for distributed tracing in the kubelet KubeletTracing featuregate.Feature = "KubeletTracing" // owner: @alexanderConstantinescu // kep: http://kep.k8s.io/3836 - // alpha: v1.28 - // beta: v1.30 - // stable: v1.31 // // Implement connection draining for terminating nodes for // `externalTrafficPolicy: Cluster` services. KubeProxyDrainingTerminatingNodes featuregate.Feature = "KubeProxyDrainingTerminatingNodes" - // owner: @yt2985 - // kep: http://kep.k8s.io/2799 - // alpha: v1.28 - // beta: v1.29 - // GA: v1.30 - // - // Enables cleaning up of secret-based service account tokens. - LegacyServiceAccountTokenCleanUp featuregate.Feature = "LegacyServiceAccountTokenCleanUp" - // owner: @RobertKrawitz - // alpha: v1.15 - // beta: v1.31 // // Allow use of filesystems for ephemeral storage monitoring. // Only applies if LocalStorageCapacityIsolation is set. @@ -444,104 +399,59 @@ const ( LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring" // owner: @damemi - // alpha: v1.21 - // beta: v1.22 - // GA: v1.31 // // Enables scaling down replicas via logarithmic comparison of creation/ready timestamps LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown" // owner: @sanposhiho // kep: https://kep.k8s.io/3633 - // alpha: v1.29 - // beta: v1.30 // // Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity. MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity" // owner: @denkensk // kep: https://kep.k8s.io/3243 - // alpha: v1.25 - // beta: v1.27 // // Enable MatchLabelKeys in PodTopologySpread. MatchLabelKeysInPodTopologySpread featuregate.Feature = "MatchLabelKeysInPodTopologySpread" // owner: @krmayankk - // alpha: v1.24 // // Enables maxUnavailable for StatefulSet MaxUnavailableStatefulSet featuregate.Feature = "MaxUnavailableStatefulSet" - // owner: @cynepco3hahue(alukiano) @cezaryzukowski @k-wiatrzyk - // alpha: v1.21 - // beta: v1.22 + // owner: @cynepco3hahue(alukiano) @cezaryzukowski @k-wiatrzyk, @Tal-or (only for GA graduation) + // // Allows setting memory affinity for a container based on NUMA topology MemoryManager featuregate.Feature = "MemoryManager" // owner: @xiaoxubeii // kep: https://kep.k8s.io/2570 - // alpha: v1.22 // // Enables kubelet to support memory QoS with cgroups v2. MemoryQoS featuregate.Feature = "MemoryQoS" - // owner: @sanposhiho - // kep: https://kep.k8s.io/3022 - // alpha: v1.24 - // beta: v1.25 - // GA: v1.30 - // - // Enable MinDomains in Pod Topology Spread. - MinDomainsInPodTopologySpread featuregate.Feature = "MinDomainsInPodTopologySpread" - // owner: @aojea // kep: https://kep.k8s.io/1880 - // alpha: v1.27 - // beta: v1.31 // // Enables the dynamic configuration of Service IP ranges MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator" - // owner: @jsafrane - // kep: https://kep.k8s.io/3756 - // alpha: v1.25 (as part of SELinuxMountReadWriteOncePod) - // beta: v1.27 - // GA: v1.30 - // Robust VolumeManager reconstruction after kubelet restart. - NewVolumeManagerReconstruction featuregate.Feature = "NewVolumeManagerReconstruction" - // owner: @danwinship // kep: https://kep.k8s.io/3866 - // alpha: v1.29 - // beta: v1.31 // // Allows running kube-proxy with `--mode nftables`. NFTablesProxyMode featuregate.Feature = "NFTablesProxyMode" // owner: @aravindhp @LorbusChris // kep: http://kep.k8s.io/2271 - // alpha: v1.27 - // beta: v1.30 // // Enables querying logs of node services using the /logs endpoint. Enabling this feature has security implications. // The recommendation is to enable it on a need basis for debugging purposes and disabling otherwise. NodeLogQuery featuregate.Feature = "NodeLogQuery" - // owner: @xing-yang @sonasingh46 - // kep: https://kep.k8s.io/2268 - // alpha: v1.24 - // beta: v1.26 - // GA: v1.28 - // - // Allow pods to failover to a different node in case of non graceful node shutdown - NodeOutOfServiceVolumeDetach featuregate.Feature = "NodeOutOfServiceVolumeDetach" - // owner: @iholder101 @kannon92 // kep: https://kep.k8s.io/2400 - // alpha: v1.22 - // beta1: v1.28 (default=false) - // beta2: v.1.30 (default=true) // Permits kubelet to run with swap enabled. NodeSwap featuregate.Feature = "NodeSwap" @@ -554,42 +464,30 @@ const ( // owner: @mortent, @atiratree, @ravig // kep: http://kep.k8s.io/3018 - // alpha: v1.26 - // beta: v1.27 - // GA: v1.31 // // Enables PDBUnhealthyPodEvictionPolicy for PodDisruptionBudgets PDBUnhealthyPodEvictionPolicy featuregate.Feature = "PDBUnhealthyPodEvictionPolicy" // owner: @RomanBednar // kep: https://kep.k8s.io/3762 - // alpha: v1.28 - // beta: v1.29 - // GA: v1.31 // // Adds a new field to persistent volumes which holds a timestamp of when the volume last transitioned its phase. PersistentVolumeLastPhaseTransitionTime featuregate.Feature = "PersistentVolumeLastPhaseTransitionTime" // owner: @haircommander // kep: https://kep.k8s.io/2364 - // alpha: v1.23 // // Configures the Kubelet to use the CRI to populate pod and container stats, instead of supplimenting with stats from cAdvisor. // Requires the CRI implementation supports supplying the required stats. PodAndContainerStatsFromCRI featuregate.Feature = "PodAndContainerStatsFromCRI" // owner: @ahg-g - // alpha: v1.21 - // beta: v1.22 // // Enables controlling pod ranking on replicaset scale-down. PodDeletionCost featuregate.Feature = "PodDeletionCost" // owner: @mimowo // kep: https://kep.k8s.io/3329 - // alpha: v1.25 - // beta: v1.26 - // stable: v1.31 // // Enables support for appending a dedicated pod condition indicating that // the pod is being deleted due to a disruption. @@ -597,62 +495,53 @@ const ( // owner: @danielvegamyhre // kep: https://kep.k8s.io/4017 - // beta: v1.28 // // Set pod completion index as a pod label for Indexed Jobs. PodIndexLabel featuregate.Feature = "PodIndexLabel" + // owner: @knight42 + // kep: https://kep.k8s.io/3288 + // alpha: v1.32 + // + // Enables only stdout or stderr of the container to be retrievd. + PodLogsQuerySplitStreams featuregate.Feature = "PodLogsQuerySplitStreams" + // owner: @ddebroy, @kannon92 - // alpha: v1.25 - // beta: v1.29 // // Enables reporting of PodReadyToStartContainersCondition condition in pod status after pod // sandbox creation and network configuration completes successfully PodReadyToStartContainersCondition featuregate.Feature = "PodReadyToStartContainersCondition" - // owner: @wzshiming - // kep: http://kep.k8s.io/2681 - // alpha: v1.28 - // beta: v1.29 - // GA: v1.30 - // - // Adds pod.status.hostIPs and downward API - PodHostIPs featuregate.Feature = "PodHostIPs" - // owner: @AxeZhan // kep: http://kep.k8s.io/3960 - // alpha: v1.29 - // beta: v1.30 // // Enables SleepAction in container lifecycle hooks PodLifecycleSleepAction featuregate.Feature = "PodLifecycleSleepAction" + // owner: @sreeram-venkitesh + // kep: http://kep.k8s.io/4818 + // + // Allows zero value for sleep duration in SleepAction in container lifecycle hooks + PodLifecycleSleepActionAllowZero featuregate.Feature = "PodLifecycleSleepActionAllowZero" + // owner: @Huang-Wei // kep: https://kep.k8s.io/3521 - // alpha: v1.26 - // beta: v1.27 - // stable: v1.30 // // Enable users to specify when a Pod is ready for scheduling. PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness" // owner: @seans3 // kep: http://kep.k8s.io/4006 - // alpha: v1.30 - // beta: v1.31 // // Enables PortForward to be proxied with a websocket client PortForwardWebsockets featuregate.Feature = "PortForwardWebsockets" // owner: @jessfraz - // alpha: v1.12 - // beta: v1.31 // // Enables control over ProcMountType for containers. ProcMountType featuregate.Feature = "ProcMountType" // owner: @sjenning - // alpha: v1.11 // // Allows resource reservations at the QoS level preventing pods at lower QoS levels from // bursting into resources requested at higher QoS levels (memory only for now) @@ -660,20 +549,30 @@ const ( // owner: @gnufied // kep: https://kep.k8s.io/1790 - // alpha: v1.23 + // beta - v1.32 // // Allow users to recover from volume expansion failure RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure" + // owner: @AkihiroSuda + // kep: https://kep.k8s.io/3857 + // + // Allows recursive read-only mounts. + RecursiveReadOnlyMounts featuregate.Feature = "RecursiveReadOnlyMounts" + + // owner: @adrianmoisey + // kep: https://kep.k8s.io/4427 + // + // Relaxed DNS search string validation. + RelaxedDNSSearchValidation featuregate.Feature = "RelaxedDNSSearchValidation" + // owner: @HirazawaUi // kep: https://kep.k8s.io/4369 - // alpha: v1.30 // // Allow almost all printable ASCII characters in environment variables RelaxedEnvironmentVariableValidation featuregate.Feature = "RelaxedEnvironmentVariableValidation" // owner: @zhangweikop - // beta: v1.31 // // Enable kubelet tls server to update certificate if the specified certificate files are changed. // This feature is useful when specifying tlsCertFile & tlsPrivateKeyFile in kubelet Configuration. @@ -682,14 +581,11 @@ const ( // owner: @SergeyKanzhelev // kep: https://kep.k8s.io/4680 - // alpha: v1.31 // // Adds the AllocatedResourcesStatus to the container status. ResourceHealthStatus featuregate.Feature = "ResourceHealthStatus" // owner: @mikedanese - // alpha: v1.7 - // beta: v1.12 // // Gets a server certificate for the kubelet from the Certificate Signing // Request API instead of generating one self signed and auto rotates the @@ -698,14 +594,12 @@ const ( // owner: @kiashok // kep: https://kep.k8s.io/4216 - // alpha: v1.29 // // Adds support to pull images based on the runtime class specified. RuntimeClassInImageCriAPI featuregate.Feature = "RuntimeClassInImageCriApi" // owner: @danielvegamyhre // kep: https://kep.k8s.io/2413 - // beta: v1.27 // // Allows mutating spec.completions for Indexed job when done in tandem with // spec.parallelism. Specifically, spec.completions is mutable iff spec.completions @@ -714,23 +608,34 @@ const ( // owner: @sanposhiho // kep: http://kep.k8s.io/4247 - // beta: v1.28 // // Enables the scheduler's enhancement called QueueingHints, // which benefits to reduce the useless requeueing. SchedulerQueueingHints featuregate.Feature = "SchedulerQueueingHints" + // owner: @sanposhiho + // kep: http://kep.k8s.io/4832 + // alpha: v1.32 + // + // Running some expensive operation within the scheduler's preemption asynchronously, + // which improves the scheduling latency when the preemption involves in. + SchedulerAsyncPreemption featuregate.Feature = "SchedulerAsyncPreemption" + // owner: @atosatto @yuanchen8911 // kep: http://kep.k8s.io/3902 - // beta: v1.29 // // Decouples Taint Eviction Controller, performing taint-based Pod eviction, from Node Lifecycle Controller. SeparateTaintEvictionController featuregate.Feature = "SeparateTaintEvictionController" + // owner: @aramase + // kep: https://kep.k8s.io/4412 + // + // ServiceAccountNodeAudienceRestriction is used to restrict the audience for which the + // kubelet can request a service account token for. + ServiceAccountNodeAudienceRestriction featuregate.Feature = "ServiceAccountNodeAudienceRestriction" + // owner: @munnerz // kep: http://kep.k8s.io/4193 - // alpha: v1.29 - // beta: v1.30 // // Controls whether JTIs (UUIDs) are embedded into generated service account tokens, and whether these JTIs are // recorded into the audit log for future requests made by these tokens. @@ -738,24 +643,18 @@ const ( // owner: @munnerz // kep: http://kep.k8s.io/4193 - // alpha: v1.29 - // beta: v1.31 // // Controls whether the apiserver supports binding service account tokens to Node objects. ServiceAccountTokenNodeBinding featuregate.Feature = "ServiceAccountTokenNodeBinding" // owner: @munnerz // kep: http://kep.k8s.io/4193 - // alpha: v1.29 - // beta: v1.30 // // Controls whether the apiserver will validate Node claims in service account tokens. ServiceAccountTokenNodeBindingValidation featuregate.Feature = "ServiceAccountTokenNodeBindingValidation" // owner: @munnerz // kep: http://kep.k8s.io/4193 - // alpha: v1.29 - // beta: v1.30 // // Controls whether the apiserver embeds the node name and uid for the associated node when issuing // service account tokens bound to Pod objects. @@ -763,16 +662,12 @@ const ( // owner: @gauravkghildiyal @robscott // kep: https://kep.k8s.io/4444 - // alpha: v1.30 - // beta: v1.31 // // Enables trafficDistribution field on Services. ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution" // owner: @gjkim42 @SergeyKanzhelev @matthyx @tzneal // kep: http://kep.k8s.io/753 - // alpha: v1.28 - // beta: v1.29 // // Introduces sidecar containers, a new type of init container that starts // before other containers but remains running for the full duration of the @@ -780,54 +675,41 @@ const ( SidecarContainers featuregate.Feature = "SidecarContainers" // owner: @derekwaynecarr - // alpha: v1.20 - // beta: v1.22 // // Enables kubelet support to size memory backed volumes + // This is a kubelet only feature gate. + // Code can be removed in 1.35 without any consideration for emulated versions. SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes" - // owner: @alexanderConstantinescu - // kep: http://kep.k8s.io/3458 - // beta: v1.27 - // GA: v1.30 - // - // Enables less load balancer re-configurations by the service controller - // (KCCM) as an effect of changing node state. - StableLoadBalancerNodeSet featuregate.Feature = "StableLoadBalancerNodeSet" - // owner: @mattcary - // alpha: v1.22 - // beta: v1.27 // // Enables policies controlling deletion of PVCs created by a StatefulSet. StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC" // owner: @psch - // alpha: v1.26 - // beta: v1.27 - // stable: v1.31 // // Enables a StatefulSet to start from an arbitrary non zero ordinal StatefulSetStartOrdinal featuregate.Feature = "StatefulSetStartOrdinal" + // owner: @ahutsunshine + // + // Allows namespace indexer for namespace scope resources in apiserver cache to accelerate list operations. + StorageNamespaceIndex featuregate.Feature = "StorageNamespaceIndex" + // owner: @nilekhc // kep: https://kep.k8s.io/4192 - // alpha: v1.30 // Enables support for the StorageVersionMigrator controller. StorageVersionMigrator featuregate.Feature = "StorageVersionMigrator" // owner: @robscott // kep: https://kep.k8s.io/2433 - // alpha: v1.21 - // beta: v1.23 // // Enables topology aware hints for EndpointSlices TopologyAwareHints featuregate.Feature = "TopologyAwareHints" // owner: @PiotrProkop // kep: https://kep.k8s.io/3545 - // alpha: v1.26 // // Allow fine-tuning of topology manager policies with alpha options. // This feature gate: @@ -837,7 +719,6 @@ const ( // owner: @PiotrProkop // kep: https://kep.k8s.io/3545 - // alpha: v1.26 // // Allow fine-tuning of topology manager policies with beta options. // This feature gate: @@ -848,70 +729,68 @@ const ( // owner: @PiotrProkop // kep: https://kep.k8s.io/3545 - // alpha: v1.26 // // Allow the usage of options to fine-tune the topology manager policies. TopologyManagerPolicyOptions featuregate.Feature = "TopologyManagerPolicyOptions" // owner: @seans3 // kep: http://kep.k8s.io/4006 - // alpha: v1.29 - // beta: v1.30 // // Enables StreamTranslator proxy to handle WebSockets upgrade requests for the // version of the RemoteCommand subprotocol that supports the "close" signal. TranslateStreamCloseWebsocketRequests featuregate.Feature = "TranslateStreamCloseWebsocketRequests" // owner: @richabanker - // alpha: v1.28 // // Proxies client to an apiserver capable of serving the request in the event of version skew. UnknownVersionInteroperabilityProxy featuregate.Feature = "UnknownVersionInteroperabilityProxy" // owner: @rata, @giuseppe // kep: https://kep.k8s.io/127 - // alpha: v1.25 - // beta: v1.30 // // Enables user namespace support for stateless pods. UserNamespacesSupport featuregate.Feature = "UserNamespacesSupport" // owner: @mattcarry, @sunnylovestiramisu // kep: https://kep.k8s.io/3751 - // alpha: v1.29 - // beta: v1.31 (off by default) // // Enables user specified volume attributes for persistent volumes, like iops and throughput. VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass" // owner: @cofyc - // alpha: v1.21 VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority" // owner: @ksubrmnn - // alpha: v1.14 // // Allows kube-proxy to create DSR loadbalancers for Windows WinDSR featuregate.Feature = "WinDSR" + // owner: @zylxjtu + // kep: https://kep.k8s.io/4802 + // alpha: v1.32 + // + // Enables support for graceful shutdown windows node. + WindowsGracefulNodeShutdown featuregate.Feature = "WindowsGracefulNodeShutdown" + // owner: @ksubrmnn - // alpha: v1.14 - // beta: v1.20 // // Allows kube-proxy to run in Overlay mode for Windows WinOverlay featuregate.Feature = "WinOverlay" + // owner: @jsturtevant + // kep: https://kep.k8s.io/4888 + // + // Add CPU and Memory Affinity support to Windows nodes with CPUManager, MemoryManager and Topology manager + WindowsCPUAndMemoryAffinity featuregate.Feature = "WindowsCPUAndMemoryAffinity" + // owner: @marosset // kep: https://kep.k8s.io/3503 - // alpha: v1.26 // // Enables support for joining Windows containers to a hosts' network namespace. WindowsHostNetwork featuregate.Feature = "WindowsHostNetwork" // owner: @kerthcet // kep: https://kep.k8s.io/3094 - // alpha: v1.25 - // beta: v1.26 // // Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when // calculating pod topology spread skew. @@ -919,37 +798,23 @@ const ( // owner: @jsafrane // kep: https://kep.k8s.io/1710 - // alpha: v1.25 - // beta: v1.27 // Speed up container startup by mounting volumes with the correct SELinux label // instead of changing each file on the volumes recursively. // Initial implementation focused on ReadWriteOncePod volumes. SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod" - // owner: @vinaykul - // kep: http://kep.k8s.io/1287 - // alpha: v1.27 - // - // Enables In-Place Pod Vertical Scaling - InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling" - // owner: @Sh4d1,@RyanAoh,@rikatz // kep: http://kep.k8s.io/1860 - // alpha: v1.29 - // beta: v1.30 // LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode" // owner: @haircommander // kep: http://kep.k8s.io/4210 - // alpha: v1.29 - // beta: v1.30 // ImageMaximumGCAge enables the Kubelet configuration field of the same name, allowing an admin // to specify the age after which an image will be garbage collected. ImageMaximumGCAge featuregate.Feature = "ImageMaximumGCAge" // owner: @saschagrunert - // alpha: v1.28 // // Enables user namespace support for Pod Security Standards. Enabling this // feature will modify all Pod Security Standard rules to allow setting: @@ -960,45 +825,62 @@ const ( // releases. UserNamespacesPodSecurityStandards featuregate.Feature = "UserNamespacesPodSecurityStandards" - // owner: @ahutsunshine - // beta: v1.30 - // - // Allows namespace indexer for namespace scope resources in apiserver cache to accelerate list operations. - StorageNamespaceIndex featuregate.Feature = "StorageNamespaceIndex" - // owner: @jsafrane // kep: https://kep.k8s.io/1710 - // alpha: v1.30 // Speed up container startup by mounting volumes with the correct SELinux label // instead of changing each file on the volumes recursively. SELinuxMount featuregate.Feature = "SELinuxMount" - // owner: @AkihiroSuda - // kep: https://kep.k8s.io/3857 - // alpha: v1.30 - // beta: v1.31 - // - // Allows recursive read-only mounts. - RecursiveReadOnlyMounts featuregate.Feature = "RecursiveReadOnlyMounts" - // owner: @everpeace // kep: https://kep.k8s.io/3619 - // alpha: v1.31 // // Enable SupplementalGroupsPolicy feature in PodSecurityContext SupplementalGroupsPolicy featuregate.Feature = "SupplementalGroupsPolicy" // owner: @saschagrunert // kep: https://kep.k8s.io/4639 - // alpha: v1.31 // // Enables the image volume source. ImageVolume featuregate.Feature = "ImageVolume" + + // owner: @zhifei92 + // beta: v1.32 + // + // Enables the systemd watchdog for the kubelet. When enabled, the kubelet will + // periodically notify the systemd watchdog to indicate that it is still alive. + // This can help prevent the system from restarting the kubelet if it becomes + // unresponsive. The feature gate is enabled by default, but should only be used + // if the system supports the systemd watchdog feature and has it configured properly. + SystemdWatchdog = featuregate.Feature("SystemdWatchdog") + + // owner: @jsafrane + // kep: https://kep.k8s.io/1710 + // alpha: v1.32 + // + // Speed up container startup by mounting volumes with the correct SELinux label + // instead of changing each file on the volumes recursively. + // Enables the SELinuxChangePolicy field in PodSecurityContext before SELinuxMount featgure gate is enabled. + SELinuxChangePolicy featuregate.Feature = "SELinuxChangePolicy" + + // owner: @HarshalNeelkamal + // alpha: v1.32 + // + // Enables external service account JWT signing and key management. + // If enabled, it allows passing --service-account-signing-endpoint flag to configure external signer. + ExternalServiceAccountTokenSigner featuregate.Feature = "ExternalServiceAccountTokenSigner" + + // owner: @ndixita + // key: https://kep.k8s.io/2837 + // alpha: 1.32 + // + // Enables specifying resources at pod-level. + PodLevelResources featuregate.Feature = "PodLevelResources" ) func init() { runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) + runtime.Must(zpagesfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) // Register all client-go features with kube's feature gate instance and make all client-go // feature checks use kube's instance. The effect is that for kube binaries, client-go @@ -1010,336 +892,9 @@ func init() { clientfeatures.ReplaceFeatureGates(ca) } -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. -// To add a new feature, define a key for it above and add it here. The features will be -// available throughout Kubernetes binaries. +// defaultKubernetesFeatureGates consists of legacy unversioned Kubernetes-specific feature keys. +// Please do not add to this file and use pkg/features/versioned_kube_features.go instead. // // Entries are separated from each other with blank lines to avoid sweeping gofmt changes // when adding or removing one entry. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - CrossNamespaceVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha}, - - AllowDNSOnlyNodeCSR: {Default: false, PreRelease: featuregate.Deprecated}, // remove after 1.33 - - AllowServiceLBStatusOnNonLB: {Default: false, PreRelease: featuregate.Deprecated}, // remove after 1.29 - - AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24 - - AppArmor: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - AppArmorFields: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - AuthorizeNodeWithSelectors: {Default: false, PreRelease: featuregate.Alpha}, - - CloudDualStackNodeIPs: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - ClusterTrustBundle: {Default: false, PreRelease: featuregate.Alpha}, - - ClusterTrustBundleProjection: {Default: false, PreRelease: featuregate.Alpha}, - - CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, - - CPUManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 - - CPUManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha}, - - CPUManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta}, - - CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta}, - - CSIMigrationPortworx: {Default: true, PreRelease: featuregate.Beta}, // On by default (requires Portworx CSI driver) - - CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha}, - - CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha}, - - ContainerCheckpoint: {Default: true, PreRelease: featuregate.Beta}, - - CronJobsScheduledAnnotation: {Default: true, PreRelease: featuregate.Beta}, - - DisableAllocatorDualWrite: {Default: false, PreRelease: featuregate.Alpha}, // remove after MultiCIDRServiceAllocator is GA - - DisableCloudProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - DisableKubeletCloudCredentialProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - DisableNodeKubeProxyVersion: {Default: false, PreRelease: featuregate.Deprecated}, // default on in 1.33 - - DevicePluginCDIDevices: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - DRAControlPlaneController: {Default: false, PreRelease: featuregate.Alpha}, - - DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, - - EventedPLEG: {Default: false, PreRelease: featuregate.Alpha}, - - ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update - - RetryGenerateName: {Default: true, PreRelease: featuregate.Beta}, - - GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta}, - - GracefulNodeShutdownBasedOnPodPriority: {Default: true, PreRelease: featuregate.Beta}, - - HPAContainerMetrics: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - HonorPVReclaimPolicy: {Default: true, PreRelease: featuregate.Beta}, - - ImageMaximumGCAge: {Default: true, PreRelease: featuregate.Beta}, - - InTreePluginPortworxUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta}, - - JobManagedBy: {Default: false, PreRelease: featuregate.Alpha}, - - JobPodFailurePolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta}, - - JobSuccessPolicy: {Default: true, PreRelease: featuregate.Beta}, - - KubeletCgroupDriverFromCRI: {Default: true, PreRelease: featuregate.Beta}, - - KubeletInUserNamespace: {Default: false, PreRelease: featuregate.Alpha}, - - KubeletPodResourcesDynamicResources: {Default: false, PreRelease: featuregate.Alpha}, - - KubeletPodResourcesGet: {Default: false, PreRelease: featuregate.Alpha}, - - KubeletSeparateDiskGC: {Default: true, PreRelease: featuregate.Beta}, - - KubeletTracing: {Default: true, PreRelease: featuregate.Beta}, - - KubeProxyDrainingTerminatingNodes: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31; remove in 1.33 - - LegacyServiceAccountTokenCleanUp: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32 - - LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Beta}, - - LogarithmicScaleDown: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - MatchLabelKeysInPodAffinity: {Default: true, PreRelease: featuregate.Beta}, - - MatchLabelKeysInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, - - MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha}, - - MemoryManager: {Default: true, PreRelease: featuregate.Beta}, - - MemoryQoS: {Default: false, PreRelease: featuregate.Alpha}, - - MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Beta}, - - NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - NFTablesProxyMode: {Default: true, PreRelease: featuregate.Beta}, - - NodeLogQuery: {Default: false, PreRelease: featuregate.Beta}, - - NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - NodeSwap: {Default: true, PreRelease: featuregate.Beta}, - - OrderedNamespaceDeletion: {Default: false, PreRelease: featuregate.Beta}, - - PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - PersistentVolumeLastPhaseTransitionTime: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha}, - - PodDeletionCost: {Default: true, PreRelease: featuregate.Beta}, - - PodDisruptionConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta}, - - PodHostIPs: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - PodLifecycleSleepAction: {Default: true, PreRelease: featuregate.Beta}, - - PodSchedulingReadiness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32 - - PortForwardWebsockets: {Default: true, PreRelease: featuregate.Beta}, - - ProcMountType: {Default: false, PreRelease: featuregate.Beta}, - - QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, - - RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha}, - - RelaxedEnvironmentVariableValidation: {Default: false, PreRelease: featuregate.Alpha}, - - ReloadKubeletServerCertificateFile: {Default: true, PreRelease: featuregate.Beta}, - - ResourceHealthStatus: {Default: false, PreRelease: featuregate.Alpha}, - - RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta}, - - RuntimeClassInImageCriAPI: {Default: false, PreRelease: featuregate.Alpha}, - - ElasticIndexedJob: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.32 - - SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta}, - - SeparateTaintEvictionController: {Default: true, PreRelease: featuregate.Beta}, - - ServiceAccountTokenJTI: {Default: true, PreRelease: featuregate.Beta}, - - ServiceAccountTokenPodNodeInfo: {Default: true, PreRelease: featuregate.Beta}, - - ServiceAccountTokenNodeBinding: {Default: true, PreRelease: featuregate.Beta}, - - ServiceAccountTokenNodeBindingValidation: {Default: true, PreRelease: featuregate.Beta}, - - ServiceTrafficDistribution: {Default: true, PreRelease: featuregate.Beta}, - - SidecarContainers: {Default: true, PreRelease: featuregate.Beta}, - - SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta}, - - StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30, remove in 1.32 - - StatefulSetAutoDeletePVC: {Default: true, PreRelease: featuregate.Beta}, - - StatefulSetStartOrdinal: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33 - - StorageVersionMigrator: {Default: false, PreRelease: featuregate.Alpha}, - - TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta}, - - TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha}, - - TopologyManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta}, - - TopologyManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta}, - - TranslateStreamCloseWebsocketRequests: {Default: true, PreRelease: featuregate.Beta}, - - UnknownVersionInteroperabilityProxy: {Default: false, PreRelease: featuregate.Alpha}, - - VolumeAttributesClass: {Default: false, PreRelease: featuregate.Beta}, - - VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha}, - - UserNamespacesSupport: {Default: false, PreRelease: featuregate.Beta}, - - WinDSR: {Default: false, PreRelease: featuregate.Alpha}, - - WinOverlay: {Default: true, PreRelease: featuregate.Beta}, - - WindowsHostNetwork: {Default: true, PreRelease: featuregate.Alpha}, - - NodeInclusionPolicyInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, - - SELinuxMountReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta}, - - InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, - - PodIndexLabel: {Default: true, PreRelease: featuregate.Beta}, - - LoadBalancerIPMode: {Default: true, PreRelease: featuregate.Beta}, - - UserNamespacesPodSecurityStandards: {Default: false, PreRelease: featuregate.Alpha}, - - SELinuxMount: {Default: false, PreRelease: featuregate.Alpha}, - - SupplementalGroupsPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - ImageVolume: {Default: false, PreRelease: featuregate.Alpha}, - - // inherited features from generic apiserver, relisted here to get a conflict if it is changed - // unintentionally on either side: - - genericfeatures.AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - genericfeatures.AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - genericfeatures.AnonymousAuthConfigurableEndpoints: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.APIServerIdentity: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.APIServerTracing: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.APIServingWithRoutine: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.AuthorizeWithSelectors: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.ConcurrentWatchObjectDecode: {Default: false, PreRelease: featuregate.Beta}, - - genericfeatures.ConsistentListFromCache: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.CoordinatedLeaderElection: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - genericfeatures.KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, - - genericfeatures.KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - genericfeatures.KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - genericfeatures.MutatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - genericfeatures.ResilientWatchCacheInitialization: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - genericfeatures.StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.StrictCostEnforcementForVAP: {Default: false, PreRelease: featuregate.Beta}, - - genericfeatures.StrictCostEnforcementForWebhooks: {Default: false, PreRelease: featuregate.Beta}, - - genericfeatures.StructuredAuthenticationConfiguration: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.StructuredAuthorizationConfiguration: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.ValidatingAdmissionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - genericfeatures.WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - genericfeatures.WatchCacheInitializationPostStartHook: {Default: false, PreRelease: featuregate.Beta}, - - genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, - - genericfeatures.WatchList: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.ZeroLimitedNominalConcurrencyShares: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed - // unintentionally on either side: - - apiextensionsfeatures.CRDValidationRatcheting: {Default: true, PreRelease: featuregate.Beta}, - - apiextensionsfeatures.CustomResourceFieldSelectors: {Default: true, PreRelease: featuregate.Beta}, - - // features that enable backwards compatibility but are scheduled to be removed - // ... - HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha}, - - AllowInsecureKubeletCertificateSigningRequests: {Default: false, PreRelease: featuregate.Deprecated}, // remove in 1.33 - - StorageNamespaceIndex: {Default: true, PreRelease: featuregate.Beta}, - - RecursiveReadOnlyMounts: {Default: true, PreRelease: featuregate.Beta}, -} +var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{} diff --git a/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go index 7ef538618..30406ae3e 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go @@ -17,18 +17,825 @@ limitations under the License. package features import ( + apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" + "k8s.io/apimachinery/pkg/util/version" + genericfeatures "k8s.io/apiserver/pkg/features" "k8s.io/component-base/featuregate" + zpagesfeatures "k8s.io/component-base/zpages/features" + kcmfeatures "k8s.io/controller-manager/pkg/features" ) // defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. -// To add a new feature, define a key for it and add it here. The features will be +// To add a new feature, define a key for it in pkg/features/kube_features.go and add it here. The features will be // available throughout Kubernetes binaries. +// For features available via specific kubernetes components like apiserver, +// cloud-controller-manager, etc find the respective kube_features.go file +// (eg:staging/src/apiserver/pkg/features/kube_features.go), define the versioned +// feature gate there, and reference it in this file. +// To support n-3 compatibility version, features may only be removed 3 releases after graduation. // -// Entries are separated from each other with blank lines to avoid sweeping gofmt changes -// when adding or removing one entry. +// Entries are alphabetized. var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ - // Example: - // genericfeatures.EmulationVersion: { - // {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, - // }, + AllowDNSOnlyNodeCSR: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + AllowInsecureKubeletCertificateSigningRequests: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + AllowOverwriteTerminationGracePeriodSeconds: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + AllowServiceLBStatusOnNonLB: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated}, + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.35 + }, + + AnyVolumeDataSource: { + {Version: version.MustParse("1.18"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta}, + }, + + AppArmor: { + {Version: version.MustParse("1.4"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + AppArmorFields: { + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + AuthorizeNodeWithSelectors: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + kcmfeatures.CloudControllerManagerWebhook: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ClusterTrustBundle: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ClusterTrustBundleProjection: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ContainerCheckpoint: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + CPUCFSQuotaPeriod: { + {Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha}, + }, + + CPUManager: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.10"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 + }, + + CPUManagerPolicyAlphaOptions: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + }, + + CPUManagerPolicyBetaOptions: { + {Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta}, + }, + + CPUManagerPolicyOptions: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta}, + }, + + CronJobsScheduledAnnotation: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35 + }, + + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + apiextensionsfeatures.CRDValidationRatcheting: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + CrossNamespaceVolumeDataSource: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + }, + + CSIMigrationPortworx: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, // On by default (requires Portworx CSI driver) + }, + + CSIVolumeHealth: { + {Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha}, + }, + + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + apiextensionsfeatures.CustomResourceFieldSelectors: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, + + DevicePluginCDIDevices: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + DisableAllocatorDualWrite: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, // remove after MultiCIDRServiceAllocator is GA + }, + + DisableCloudProviders: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + DisableKubeletCloudCredentialProviders: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + DisableNodeKubeProxyVersion: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + DRAAdminAccess: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + DynamicResourceAllocation: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta}, + }, + + DRAResourceClaimDeviceStatus: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + KubeletCrashLoopBackOffMax: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ElasticIndexedJob: { + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.32 + }, + + EventedPLEG: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ExecProbeTimeout: { + {Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update + }, + + ExternalServiceAccountTokenSigner: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.AdmissionWebhookMatchConditions: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.AggregatedDiscoveryEndpoint: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.AllowUnsafeMalformedObjectDeletion: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.AnonymousAuthConfigurableEndpoints: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.APIListChunking: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.9"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.APIResponseCompression: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.APIServerIdentity: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.APIServerTracing: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.APIServingWithRoutine: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.AuthorizeWithSelectors: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.BtreeWatchCache: { + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.CBORServingAndStorage: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.ConcurrentWatchObjectDecode: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + genericfeatures.ConsistentListFromCache: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.CoordinatedLeaderElection: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.EfficientWatchResumption: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.KMSv1: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated}, + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + genericfeatures.MutatingAdmissionPolicy: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.OpenAPIEnums: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.RemainingItemCount: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.RemoteRequestHeaderUID: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.ResilientWatchCacheInitialization: { + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.RetryGenerateName: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, + + genericfeatures.SeparateCacheWatchRPC: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.StorageVersionAPI: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + }, + + genericfeatures.StorageVersionHash: { + {Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.StrictCostEnforcementForVAP: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.StrictCostEnforcementForWebhooks: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.StructuredAuthenticationConfiguration: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.StructuredAuthorizationConfiguration: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.UnauthenticatedHTTP2DOSMitigation: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + genericfeatures.WatchBookmark: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.17"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + genericfeatures.WatchCacheInitializationPostStartHook: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + genericfeatures.WatchFromStorageWithoutResourceVersion: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta}, + }, + + genericfeatures.WatchList: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + GracefulNodeShutdown: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta}, + }, + + GracefulNodeShutdownBasedOnPodPriority: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta}, + }, + + HonorPVReclaimPolicy: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + HPAScaleToZero: { + {Version: version.MustParse("1.16"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ImageMaximumGCAge: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + ImageVolume: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + }, + + InPlacePodVerticalScaling: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + }, + + InPlacePodVerticalScalingAllocatedStatus: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + InPlacePodVerticalScalingExclusiveCPUs: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + InTreePluginPortworxUnregister: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + }, + + JobBackoffLimitPerIndex: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + JobManagedBy: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + JobPodFailurePolicy: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + JobPodReplacementPolicy: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + JobSuccessPolicy: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + KubeletCgroupDriverFromCRI: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + KubeletFineGrainedAuthz: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + KubeletInUserNamespace: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + }, + + KubeletPodResourcesDynamicResources: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + }, + + KubeletPodResourcesGet: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + }, + + KubeletRegistrationGetOnExistsOnly: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + KubeletSeparateDiskGC: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + KubeletTracing: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + }, + + KubeProxyDrainingTerminatingNodes: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31; remove in 1.33 + }, + + LoadBalancerIPMode: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + LocalStorageCapacityIsolationFSQuotaMonitoring: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + LogarithmicScaleDown: { + {Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + MatchLabelKeysInPodAffinity: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + MatchLabelKeysInPodTopologySpread: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + }, + + MaxUnavailableStatefulSet: { + {Version: version.MustParse("1.24"), Default: false, PreRelease: featuregate.Alpha}, + }, + + MemoryManager: { + {Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + MemoryQoS: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + }, + + MultiCIDRServiceAllocator: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + NFTablesProxyMode: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + NodeInclusionPolicyInPodTopologySpread: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta}, + }, + + NodeLogQuery: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + }, + + NodeSwap: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + OrderedNamespaceDeletion: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + }, + + PDBUnhealthyPodEvictionPolicy: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + PersistentVolumeLastPhaseTransitionTime: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + PodAndContainerStatsFromCRI: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + }, + + PodDeletionCost: { + {Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta}, + }, + + PodDisruptionConditions: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + }, + + PodIndexLabel: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35 + }, + + PodLevelResources: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + PodLifecycleSleepAction: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + PodReadyToStartContainersCondition: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + PodLifecycleSleepActionAllowZero: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + PodSchedulingReadiness: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32 + }, + + PortForwardWebsockets: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + ProcMountType: { + {Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + QOSReserved: { + {Version: version.MustParse("1.11"), Default: false, PreRelease: featuregate.Alpha}, + }, + + RecoverVolumeExpansionFailure: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + RecursiveReadOnlyMounts: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + RelaxedDNSSearchValidation: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + RelaxedEnvironmentVariableValidation: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + ReloadKubeletServerCertificateFile: { + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + ResourceHealthStatus: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + }, + + RotateKubeletServerCertificate: { + {Version: version.MustParse("1.7"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.12"), Default: true, PreRelease: featuregate.Beta}, + }, + + RuntimeClassInImageCriAPI: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + }, + + SchedulerAsyncPreemption: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + SchedulerQueueingHints: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + SELinuxChangePolicy: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + SELinuxMount: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + }, + + SELinuxMountReadWriteOncePod: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + }, + + SeparateTaintEvictionController: { + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + StorageNamespaceIndex: { + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + ServiceAccountNodeAudienceRestriction: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta}, + }, + + ServiceAccountTokenJTI: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34 + }, + + ServiceAccountTokenNodeBinding: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + ServiceAccountTokenNodeBindingValidation: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34 + }, + + ServiceAccountTokenPodNodeInfo: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34 + }, + + ServiceTrafficDistribution: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + SidecarContainers: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + SizeMemoryBackedVolumes: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, + + PodLogsQuerySplitStreams: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + StatefulSetAutoDeletePVC: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.32, remove in 1.35 + }, + + StatefulSetStartOrdinal: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33 + }, + + StorageVersionMigrator: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + }, + + SupplementalGroupsPolicy: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + }, + + SystemdWatchdog: { + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + TopologyAwareHints: { + {Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta}, + }, + + TopologyManagerPolicyAlphaOptions: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + }, + + TopologyManagerPolicyBetaOptions: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + }, + + TopologyManagerPolicyOptions: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA}, + }, + + TranslateStreamCloseWebsocketRequests: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + UnknownVersionInteroperabilityProxy: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + }, + + UserNamespacesPodSecurityStandards: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + }, + + UserNamespacesSupport: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + }, + + VolumeAttributesClass: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + VolumeCapacityPriority: { + {Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha}, + }, + + WinDSR: { + {Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha}, + }, + WindowsGracefulNodeShutdown: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + WinOverlay: { + {Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.Beta}, + }, + + WindowsCPUAndMemoryAffinity: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + WindowsHostNetwork: { + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Alpha}, + }, + + zpagesfeatures.ComponentFlagz: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + zpagesfeatures.ComponentStatusz: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, } diff --git a/vendor/k8s.io/utils/lru/lru.go b/vendor/k8s.io/utils/lru/lru.go index 47f135281..f0b67462f 100644 --- a/vendor/k8s.io/utils/lru/lru.go +++ b/vendor/k8s.io/utils/lru/lru.go @@ -16,6 +16,7 @@ limitations under the License. package lru import ( + "fmt" "sync" groupcache "k8s.io/utils/internal/third_party/forked/golang/golang-lru" @@ -44,6 +45,15 @@ func NewWithEvictionFunc(size int, f EvictionFunc) *Cache { return c } +// SetEvictionFunc updates the eviction func +func (c *Cache) SetEvictionFunc(f EvictionFunc) error { + if c.cache.OnEvicted != nil { + return fmt.Errorf("lru cache eviction function is already set") + } + c.cache.OnEvicted = f + return nil +} + // Add adds a value to the cache. func (c *Cache) Add(key Key, value interface{}) { c.lock.Lock() diff --git a/vendor/modules.txt b/vendor/modules.txt index a6eb195d1..eb7238d79 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,9 @@ # 4d63.com/gochecknoglobals v0.1.0 ## explicit; go 1.15 4d63.com/gochecknoglobals/checknoglobals +# cel.dev/expr v0.19.1 +## explicit; go 1.21.1 +cel.dev/expr # github.com/Abirdcfly/dupword v0.0.7 ## explicit; go 1.19 github.com/Abirdcfly/dupword @@ -128,7 +131,7 @@ github.com/davecgh/go-spew/spew # github.com/denis-tingaikin/go-header v0.4.3 ## explicit; go 1.17 github.com/denis-tingaikin/go-header -# github.com/distribution/reference v0.5.0 +# github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference # github.com/emicklei/go-restful/v3 v3.12.1 @@ -141,9 +144,6 @@ github.com/esimonov/ifshort/pkg/analyzer # github.com/ettle/strcase v0.1.1 ## explicit; go 1.12 github.com/ettle/strcase -# github.com/evanphx/json-patch v5.9.0+incompatible -## explicit -github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.9.0 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 @@ -279,16 +279,9 @@ github.com/gogo/protobuf/protoc-gen-gogo/plugin github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/vanity github.com/gogo/protobuf/vanity/command -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit -github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp # github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 ## explicit github.com/golangci/check/cmd/structcheck @@ -353,8 +346,11 @@ github.com/golangci/revgrep # github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 ## explicit github.com/golangci/unconvert -# github.com/google/cel-go v0.20.1 +# github.com/google/btree v1.1.3 ## explicit; go 1.18 +github.com/google/btree +# github.com/google/cel-go v0.22.0 +## explicit; go 1.21.1 github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls @@ -377,15 +373,15 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic-models v0.6.8 -## explicit; go 1.18 +# github.com/google/gnostic-models v0.6.9 +## explicit; go 1.21 github.com/google/gnostic-models/compiler github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema github.com/google/gnostic-models/openapiv2 github.com/google/gnostic-models/openapiv3 -# github.com/google/go-cmp v0.6.0 -## explicit; go 1.13 +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff @@ -406,8 +402,8 @@ github.com/google/go-jsonnet/internal/program ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 @@ -434,8 +430,8 @@ github.com/gostaticanalysis/nilerr # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 -## explicit; go 1.20 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 +## explicit; go 1.22.7 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -465,9 +461,6 @@ github.com/hashicorp/hcl/json/token github.com/hexops/gotextdiff github.com/hexops/gotextdiff/myers github.com/hexops/gotextdiff/span -# github.com/imdario/mergo v0.3.16 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -499,15 +492,6 @@ github.com/kisielk/gotool/internal/load # github.com/kkHAIKE/contextcheck v1.1.3 ## explicit; go 1.15 github.com/kkHAIKE/contextcheck -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 -github.com/klauspost/compress -github.com/klauspost/compress/fse -github.com/klauspost/compress/huff0 -github.com/klauspost/compress/internal/cpuinfo -github.com/klauspost/compress/internal/snapref -github.com/klauspost/compress/zstd -github.com/klauspost/compress/zstd/internal/xxhash # github.com/kulti/thelper v0.6.3 ## explicit; go 1.18 github.com/kulti/thelper/pkg/analyzer @@ -628,8 +612,8 @@ github.com/nishanths/predeclared/passes/predeclared # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/onsi/ginkgo/v2 v2.19.0 -## explicit; go 1.20 +# github.com/onsi/ginkgo/v2 v2.21.0 +## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -650,8 +634,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.34.0 -## explicit; go 1.20 +# github.com/onsi/gomega v1.35.1 +## explicit; go 1.22 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -665,8 +649,8 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/openshift/api v0.0.0-20241101202457-04eb3fd119d2 -## explicit; go 1.22.0 +# github.com/openshift/api v0.0.0-20250320170726-75d64d71980b +## explicit; go 1.23.0 github.com/openshift/api github.com/openshift/api/apiserver github.com/openshift/api/apiserver/v1 @@ -736,7 +720,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 +# github.com/openshift/build-machinery-go v0.0.0-20250102153059-e85a1a7ecb5c ## explicit; go 1.22.0 github.com/openshift/build-machinery-go github.com/openshift/build-machinery-go/make @@ -746,8 +730,8 @@ github.com/openshift/build-machinery-go/make/targets/golang github.com/openshift/build-machinery-go/make/targets/openshift github.com/openshift/build-machinery-go/make/targets/openshift/operator github.com/openshift/build-machinery-go/scripts -# github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f -## explicit; go 1.22.0 +# github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 +## explicit; go 1.23.0 github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 github.com/openshift/client-go/config/applyconfigurations/internal @@ -764,8 +748,8 @@ github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 github.com/openshift/client-go/operator/applyconfigurations/internal github.com/openshift/client-go/operator/applyconfigurations/operator/v1 -# github.com/openshift/library-go v0.0.0-20250403134058-7c43fdf96c62 -## explicit; go 1.22.0 +# github.com/openshift/library-go v0.0.0-20250609093359-ccdcf648dd95 +## explicit; go 1.23.0 github.com/openshift/library-go/pkg/apiserver/jsonpatch github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer github.com/openshift/library-go/pkg/config/client @@ -817,22 +801,23 @@ github.com/pmezard/go-difflib/difflib # github.com/polyfloyd/go-errorlint v1.0.5 ## explicit; go 1.13 github.com/polyfloyd/go-errorlint/errorlint -# github.com/prometheus/client_golang v1.20.4 -## explicit; go 1.20 +# github.com/prometheus/client_golang v1.22.0 +## explicit; go 1.22 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus/promhttp/internal github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.62.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -898,6 +883,9 @@ github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping github.com/securego/gosec/v2 github.com/securego/gosec/v2/cwe github.com/securego/gosec/v2/rules +# github.com/sergi/go-diff v1.2.0 +## explicit; go 1.12 +github.com/sergi/go-diff/diffmatchpatch # github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c ## explicit github.com/shazow/go-diff/difflib @@ -1024,40 +1012,45 @@ github.com/yeya24/promlinter # gitlab.com/bosi/decorder v0.2.3 ## explicit; go 1.17 gitlab.com/bosi/decorder -# go.etcd.io/etcd/api/v3 v3.5.14 -## explicit; go 1.21 +# go.etcd.io/etcd/api/v3 v3.5.21 +## explicit; go 1.23.0 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.14 -## explicit; go 1.21 +# go.etcd.io/etcd/client/pkg/v3 v3.5.21 +## explicit; go 1.23.0 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.14 -## explicit; go 1.21 +# go.etcd.io/etcd/client/v3 v3.5.21 +## explicit; go 1.23.0 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 -## explicit; go 1.21 +go.etcd.io/etcd/client/v3/kubernetes +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 +## explicit; go 1.22.7 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.29.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -1072,49 +1065,41 @@ go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.29.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/metric v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/sdk v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.29.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/trace v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.4.0 +## explicit; go 1.22.7 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# go.starlark.net v0.0.0-20230525235612-a134d8f9ddca -## explicit; go 1.16 -go.starlark.net/internal/compile -go.starlark.net/internal/spell -go.starlark.net/resolve -go.starlark.net/starlark -go.starlark.net/starlarkstruct -go.starlark.net/syntax # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -1130,8 +1115,8 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.32.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.36.0 +## explicit; go 1.23.0 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/hkdf @@ -1147,14 +1132,14 @@ golang.org/x/exp/slices # golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 ## explicit; go 1.18 golang.org/x/exp/typeparams -# golang.org/x/mod v0.20.0 -## explicit; go 1.18 +# golang.org/x/mod v0.21.0 +## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.34.0 -## explicit; go 1.18 +# golang.org/x/net v0.38.0 +## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom @@ -1163,30 +1148,31 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/httpcommon golang.org/x/net/internal/timeseries golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.23.0 -## explicit; go 1.18 +# golang.org/x/oauth2 v0.27.0 +## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.10.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.29.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.28.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.21.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1217,11 +1203,11 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.24.0 -## explicit; go 1.19 +# golang.org/x/tools v0.26.0 +## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/asmdecl @@ -1291,25 +1277,24 @@ golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed +# google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.66.2 -## explicit; go 1.21 +# google.golang.org/grpc v1.68.1 +## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1317,6 +1302,8 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1368,8 +1355,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.36.5 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -1392,6 +1379,7 @@ google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/protolazy google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version @@ -1460,8 +1448,8 @@ honnef.co/go/tools/staticcheck/fakereflect honnef.co/go/tools/staticcheck/fakexml honnef.co/go/tools/stylecheck honnef.co/go/tools/unused -# k8s.io/api v0.31.7 -## explicit; go 1.22.0 +# k8s.io/api v0.32.5 +## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -1488,7 +1476,7 @@ k8s.io/api/certificates/v1 k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 -k8s.io/api/coordination/v1alpha1 +k8s.io/api/coordination/v1alpha2 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 k8s.io/api/discovery/v1 @@ -1513,6 +1501,7 @@ k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 k8s.io/api/resource/v1alpha3 +k8s.io/api/resource/v1beta1 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 @@ -1520,8 +1509,8 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.31.1 -## explicit; go 1.22.0 +# k8s.io/apiextensions-apiserver v0.32.1 +## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 @@ -1532,8 +1521,8 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.31.7 -## explicit; go 1.22.0 +# k8s.io/apimachinery v0.32.5 +## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1556,6 +1545,7 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json @@ -1597,17 +1587,20 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.31.7 -## explicit; go 1.22.0 +# k8s.io/apiserver v0.32.5 +## explicit; go 1.23.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer k8s.io/apiserver/pkg/admission/metrics +k8s.io/apiserver/pkg/admission/plugin/authorizer k8s.io/apiserver/pkg/admission/plugin/cel k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle k8s.io/apiserver/pkg/admission/plugin/policy/generic k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic k8s.io/apiserver/pkg/admission/plugin/policy/matching +k8s.io/apiserver/pkg/admission/plugin/policy/mutating +k8s.io/apiserver/pkg/admission/plugin/policy/mutating/patch k8s.io/apiserver/pkg/admission/plugin/policy/validating k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics k8s.io/apiserver/pkg/admission/plugin/webhook @@ -1663,6 +1656,8 @@ k8s.io/apiserver/pkg/cel/common k8s.io/apiserver/pkg/cel/environment k8s.io/apiserver/pkg/cel/lazy k8s.io/apiserver/pkg/cel/library +k8s.io/apiserver/pkg/cel/mutation +k8s.io/apiserver/pkg/cel/mutation/dynamic k8s.io/apiserver/pkg/cel/openapi k8s.io/apiserver/pkg/cel/openapi/resolver k8s.io/apiserver/pkg/endpoints @@ -1737,7 +1732,6 @@ k8s.io/apiserver/pkg/util/flowcontrol/request k8s.io/apiserver/pkg/util/flushwriter k8s.io/apiserver/pkg/util/peerproxy/metrics k8s.io/apiserver/pkg/util/shufflesharding -k8s.io/apiserver/pkg/util/version k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning @@ -1748,8 +1742,8 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/client-go v0.31.7 -## explicit; go 1.22.0 +# k8s.io/client-go v0.32.5 +## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1768,7 +1762,7 @@ k8s.io/client-go/applyconfigurations/certificates/v1 k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 -k8s.io/client-go/applyconfigurations/coordination/v1alpha1 +k8s.io/client-go/applyconfigurations/coordination/v1alpha2 k8s.io/client-go/applyconfigurations/coordination/v1beta1 k8s.io/client-go/applyconfigurations/core/v1 k8s.io/client-go/applyconfigurations/discovery/v1 @@ -1795,6 +1789,7 @@ k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 k8s.io/client-go/applyconfigurations/resource/v1alpha3 +k8s.io/client-go/applyconfigurations/resource/v1beta1 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -1836,7 +1831,7 @@ k8s.io/client-go/informers/certificates/v1alpha1 k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination k8s.io/client-go/informers/coordination/v1 -k8s.io/client-go/informers/coordination/v1alpha1 +k8s.io/client-go/informers/coordination/v1alpha2 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/core k8s.io/client-go/informers/core/v1 @@ -1871,6 +1866,7 @@ k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 k8s.io/client-go/informers/resource k8s.io/client-go/informers/resource/v1alpha3 +k8s.io/client-go/informers/resource/v1beta1 k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 @@ -1928,8 +1924,8 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1/fake -k8s.io/client-go/kubernetes/typed/coordination/v1alpha1 -k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 +k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1 @@ -1976,6 +1972,8 @@ k8s.io/client-go/kubernetes/typed/rbac/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake k8s.io/client-go/kubernetes/typed/resource/v1alpha3 k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake +k8s.io/client-go/kubernetes/typed/resource/v1beta1 +k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 @@ -2008,7 +2006,7 @@ k8s.io/client-go/listers/certificates/v1 k8s.io/client-go/listers/certificates/v1alpha1 k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 -k8s.io/client-go/listers/coordination/v1alpha1 +k8s.io/client-go/listers/coordination/v1alpha2 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/core/v1 k8s.io/client-go/listers/discovery/v1 @@ -2032,6 +2030,7 @@ k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 k8s.io/client-go/listers/resource/v1alpha3 +k8s.io/client-go/listers/resource/v1beta1 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 @@ -2070,6 +2069,7 @@ k8s.io/client-go/tools/record k8s.io/client-go/tools/record/util k8s.io/client-go/tools/reference k8s.io/client-go/transport +k8s.io/client-go/util/apply k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/consistencydetector @@ -2092,8 +2092,8 @@ k8s.io/cloud-provider/controllers/service/config k8s.io/cloud-provider/controllers/service/config/v1alpha1 k8s.io/cloud-provider/names k8s.io/cloud-provider/options -# k8s.io/code-generator v0.31.1 -## explicit; go 1.22.0 +# k8s.io/code-generator v0.32.1 +## explicit; go 1.23.0 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen k8s.io/code-generator/cmd/applyconfiguration-gen/args @@ -2129,8 +2129,8 @@ k8s.io/code-generator/cmd/register-gen/generators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.31.7 -## explicit; go 1.22.0 +# k8s.io/component-base v0.32.5 +## explicit; go 1.23.0 k8s.io/component-base/cli k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -2155,13 +2155,16 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.30.1 -## explicit; go 1.22.0 +k8s.io/component-base/zpages/features +k8s.io/component-base/zpages/flagz +# k8s.io/component-helpers v0.32.1 +## explicit; go 1.23.0 k8s.io/component-helpers/node/util/sysctl +k8s.io/component-helpers/resource k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -# k8s.io/controller-manager v0.30.1 -## explicit; go 1.22.0 +# k8s.io/controller-manager v0.32.5 +## explicit; go 1.23.0 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 k8s.io/controller-manager/config/v1alpha1 @@ -2172,7 +2175,7 @@ k8s.io/controller-manager/pkg/features k8s.io/controller-manager/pkg/features/register k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 +# k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 ## explicit; go 1.20 k8s.io/gengo/v2 k8s.io/gengo/v2/generator @@ -2190,21 +2193,21 @@ k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/internal/verbosity k8s.io/klog/v2/textlogger -# k8s.io/kms v0.31.7 -## explicit; go 1.22.0 +# k8s.io/kms v0.32.5 +## explicit; go 1.23.0 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-aggregator v0.31.1 -## explicit; go 1.22.0 +# k8s.io/kube-aggregator v0.32.1 +## explicit; go 1.23.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 -## explicit; go 1.20 +# k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff +## explicit; go 1.21 k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder @@ -2219,6 +2222,7 @@ k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json +k8s.io/kube-openapi/pkg/internal/third_party/govalidator k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 @@ -2232,8 +2236,8 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson # k8s.io/kubelet v0.30.1 ## explicit; go 1.22.0 k8s.io/kubelet/pkg/apis -# k8s.io/kubernetes v1.31.7 -## explicit; go 1.22.0 +# k8s.io/kubernetes v1.32.5 +## explicit; go 1.23.0 k8s.io/kubernetes/pkg/api/service k8s.io/kubernetes/pkg/api/v1/service k8s.io/kubernetes/pkg/apis/apps @@ -2250,7 +2254,7 @@ k8s.io/kubernetes/pkg/features k8s.io/kubernetes/pkg/fieldpath k8s.io/kubernetes/pkg/util/parsers k8s.io/kubernetes/pkg/util/tolerations -# k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 +# k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -2277,7 +2281,7 @@ mvdan.cc/lint # mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 ## explicit; go 1.17 mvdan.cc/unparam/check -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 ## explicit; go 1.21 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics @@ -2353,8 +2357,8 @@ sigs.k8s.io/controller-tools/pkg/webhook # sigs.k8s.io/gateway-api v1.1.0 ## explicit; go 1.22.0 sigs.k8s.io/gateway-api/apis/v1 -# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd -## explicit; go 1.18 +# sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 +## explicit; go 1.21 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json # sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 @@ -2363,8 +2367,8 @@ sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1 sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1 -# sigs.k8s.io/kustomize/api v0.17.2 -## explicit; go 1.21 +# sigs.k8s.io/kustomize/api v0.18.0 +## explicit; go 1.22.7 sigs.k8s.io/kustomize/api/filters/annotations sigs.k8s.io/kustomize/api/filters/fieldspec sigs.k8s.io/kustomize/api/filters/filtersutil @@ -2413,8 +2417,8 @@ sigs.k8s.io/kustomize/api/provider sigs.k8s.io/kustomize/api/resmap sigs.k8s.io/kustomize/api/resource sigs.k8s.io/kustomize/api/types -# sigs.k8s.io/kustomize/cmd/config v0.14.1 -## explicit; go 1.21 +# sigs.k8s.io/kustomize/cmd/config v0.15.0 +## explicit; go 1.22.7 sigs.k8s.io/kustomize/cmd/config/completion sigs.k8s.io/kustomize/cmd/config/configcobra sigs.k8s.io/kustomize/cmd/config/ext @@ -2424,8 +2428,8 @@ sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/api sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/commands sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/tutorials sigs.k8s.io/kustomize/cmd/config/runner -# sigs.k8s.io/kustomize/kustomize/v5 v5.4.2 -## explicit; go 1.21 +# sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 +## explicit; go 1.22.7 sigs.k8s.io/kustomize/kustomize/v5 sigs.k8s.io/kustomize/kustomize/v5/commands sigs.k8s.io/kustomize/kustomize/v5/commands/build @@ -2443,9 +2447,10 @@ sigs.k8s.io/kustomize/kustomize/v5/commands/openapi sigs.k8s.io/kustomize/kustomize/v5/commands/openapi/fetch sigs.k8s.io/kustomize/kustomize/v5/commands/openapi/info sigs.k8s.io/kustomize/kustomize/v5/commands/version -# sigs.k8s.io/kustomize/kyaml v0.17.1 -## explicit; go 1.21 +# sigs.k8s.io/kustomize/kyaml v0.18.1 +## explicit; go 1.22.7 sigs.k8s.io/kustomize/kyaml/comments +sigs.k8s.io/kustomize/kyaml/copyutil sigs.k8s.io/kustomize/kyaml/errors sigs.k8s.io/kustomize/kyaml/ext sigs.k8s.io/kustomize/kyaml/fieldmeta @@ -2453,8 +2458,6 @@ sigs.k8s.io/kustomize/kyaml/filesys sigs.k8s.io/kustomize/kyaml/fn/runtime/container sigs.k8s.io/kustomize/kyaml/fn/runtime/exec sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil -sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark -sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util sigs.k8s.io/kustomize/kyaml/kio sigs.k8s.io/kustomize/kyaml/kio/filters sigs.k8s.io/kustomize/kyaml/kio/kioutil @@ -2480,7 +2483,11 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 +# sigs.k8s.io/randfill v1.0.0 +## explicit; go 1.18 +sigs.k8s.io/randfill +sigs.k8s.io/randfill/bytesource +# sigs.k8s.io/structured-merge-diff/v4 v4.6.0 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go index da1e37c18..0831f3e63 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -43,6 +43,7 @@ const ( PacketType_CLOSE_RSP PacketType = 3 PacketType_DATA PacketType = 4 PacketType_DIAL_CLS PacketType = 5 + PacketType_DRAIN PacketType = 6 ) // Enum value maps for PacketType. @@ -54,6 +55,7 @@ var ( 3: "CLOSE_RSP", 4: "DATA", 5: "DIAL_CLS", + 6: "DRAIN", } PacketType_value = map[string]int32{ "DIAL_REQ": 0, @@ -62,6 +64,7 @@ var ( "CLOSE_RSP": 3, "DATA": 4, "DIAL_CLS": 5, + "DRAIN": 6, } ) @@ -106,6 +109,7 @@ type Packet struct { // *Packet_CloseRequest // *Packet_CloseResponse // *Packet_CloseDial + // *Packet_Drain Payload isPacket_Payload `protobuf_oneof:"payload"` } @@ -197,6 +201,13 @@ func (x *Packet) GetCloseDial() *CloseDial { return nil } +func (x *Packet) GetDrain() *Drain { + if x, ok := x.GetPayload().(*Packet_Drain); ok { + return x.Drain + } + return nil +} + type isPacket_Payload interface { isPacket_Payload() } @@ -225,6 +236,10 @@ type Packet_CloseDial struct { CloseDial *CloseDial `protobuf:"bytes,7,opt,name=closeDial,proto3,oneof"` } +type Packet_Drain struct { + Drain *Drain `protobuf:"bytes,8,opt,name=drain,proto3,oneof"` +} + func (*Packet_DialRequest) isPacket_Payload() {} func (*Packet_DialResponse) isPacket_Payload() {} @@ -237,6 +252,8 @@ func (*Packet_CloseResponse) isPacket_Payload() {} func (*Packet_CloseDial) isPacket_Payload() {} +func (*Packet_Drain) isPacket_Payload() {} + type DialRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -522,6 +539,44 @@ func (x *CloseDial) GetRandom() int64 { return 0 } +type Drain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Drain) Reset() { + *x = Drain{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Drain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Drain) ProtoMessage() {} + +func (x *Drain) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Drain.ProtoReflect.Descriptor instead. +func (*Drain) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{6} +} + type Data struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -538,7 +593,7 @@ type Data struct { func (x *Data) Reset() { *x = Data{} if protoimpl.UnsafeEnabled { - mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -551,7 +606,7 @@ func (x *Data) String() string { func (*Data) ProtoMessage() {} func (x *Data) ProtoReflect() protoreflect.Message { - mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -564,7 +619,7 @@ func (x *Data) ProtoReflect() protoreflect.Message { // Deprecated: Use Data.ProtoReflect.Descriptor instead. func (*Data) Descriptor() ([]byte, []int) { - return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{6} + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{7} } func (x *Data) GetConnectID() int64 { @@ -594,7 +649,7 @@ var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ 0x0a, 0x2d, 0x6b, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xd1, 0x02, 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, + 0xf1, 0x02, 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, @@ -614,7 +669,9 @@ var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x09, 0x63, - 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, + 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x05, 0x64, 0x72, 0x61, 0x69, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x48, + 0x00, 0x52, 0x05, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x5b, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x18, @@ -636,26 +693,27 @@ var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x22, 0x23, 0x0a, 0x09, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x61, - 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x4e, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x2a, 0x5e, 0x0a, 0x0a, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x00, - 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x44, 0x41, 0x54, 0x41, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x43, - 0x4c, 0x53, 0x10, 0x05, 0x32, 0x2f, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x07, 0x2e, - 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x07, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x73, 0x69, 0x67, 0x73, 0x2e, 0x6b, 0x38, - 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2d, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x6b, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x07, 0x0a, 0x05, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x22, 0x4e, 0x0a, + 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x2a, 0x69, 0x0a, + 0x0a, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, + 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, + 0x4c, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, + 0x5f, 0x52, 0x45, 0x51, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, + 0x52, 0x53, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x41, 0x10, 0x04, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x43, 0x4c, 0x53, 0x10, 0x05, 0x12, 0x09, 0x0a, + 0x05, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x10, 0x06, 0x32, 0x2f, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x12, 0x07, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x07, 0x2e, 0x50, 0x61, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x73, 0x69, 0x67, + 0x73, 0x2e, 0x6b, 0x38, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2d, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x6b, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -671,7 +729,7 @@ func file_konnectivity_client_proto_client_client_proto_rawDescGZIP() []byte { } var file_konnectivity_client_proto_client_client_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_konnectivity_client_proto_client_client_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_konnectivity_client_proto_client_client_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_konnectivity_client_proto_client_client_proto_goTypes = []interface{}{ (PacketType)(0), // 0: PacketType (*Packet)(nil), // 1: Packet @@ -680,23 +738,25 @@ var file_konnectivity_client_proto_client_client_proto_goTypes = []interface{}{ (*CloseRequest)(nil), // 4: CloseRequest (*CloseResponse)(nil), // 5: CloseResponse (*CloseDial)(nil), // 6: CloseDial - (*Data)(nil), // 7: Data + (*Drain)(nil), // 7: Drain + (*Data)(nil), // 8: Data } var file_konnectivity_client_proto_client_client_proto_depIdxs = []int32{ 0, // 0: Packet.type:type_name -> PacketType 2, // 1: Packet.dialRequest:type_name -> DialRequest 3, // 2: Packet.dialResponse:type_name -> DialResponse - 7, // 3: Packet.data:type_name -> Data + 8, // 3: Packet.data:type_name -> Data 4, // 4: Packet.closeRequest:type_name -> CloseRequest 5, // 5: Packet.closeResponse:type_name -> CloseResponse 6, // 6: Packet.closeDial:type_name -> CloseDial - 1, // 7: ProxyService.Proxy:input_type -> Packet - 1, // 8: ProxyService.Proxy:output_type -> Packet - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 7, // 7: Packet.drain:type_name -> Drain + 1, // 8: ProxyService.Proxy:input_type -> Packet + 1, // 9: ProxyService.Proxy:output_type -> Packet + 9, // [9:10] is the sub-list for method output_type + 8, // [8:9] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_konnectivity_client_proto_client_client_proto_init() } @@ -778,6 +838,18 @@ func file_konnectivity_client_proto_client_client_proto_init() { } } file_konnectivity_client_proto_client_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Drain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Data); i { case 0: return &v.state @@ -797,6 +869,7 @@ func file_konnectivity_client_proto_client_client_proto_init() { (*Packet_CloseRequest)(nil), (*Packet_CloseResponse)(nil), (*Packet_CloseDial)(nil), + (*Packet_Drain)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -804,7 +877,7 @@ func file_konnectivity_client_proto_client_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_konnectivity_client_proto_client_client_proto_rawDesc, NumEnums: 1, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto index 811278e62..006e0ac83 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto @@ -30,6 +30,7 @@ enum PacketType { CLOSE_RSP = 3; DATA = 4; DIAL_CLS = 5; + DRAIN = 6; } message Packet { @@ -42,6 +43,7 @@ message Packet { CloseRequest closeRequest = 5; CloseResponse closeResponse = 6; CloseDial closeDial = 7; + Drain drain = 8; } } @@ -85,6 +87,11 @@ message CloseDial { int64 random = 1; } +message Drain { + // A hint from an Agent to Server that it is pending termination. + // A Server should prefer non-draining agents for new dials. +} + message Data { // connectID to connect to int64 connectID = 1; diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile index 07b8bfa85..fb6cf040f 100644 --- a/vendor/sigs.k8s.io/json/Makefile +++ b/vendor/sigs.k8s.io/json/Makefile @@ -19,7 +19,7 @@ vet: go vet sigs.k8s.io/json @echo "checking for external dependencies" - @deps=$$(go mod graph); \ + @deps=$$(go list -f '{{ if not (or .Standard .Module.Main) }}{{.ImportPath}}{{ end }}' -deps sigs.k8s.io/json/... || true); \ if [ -n "$${deps}" ]; then \ echo "only stdlib dependencies allowed, found:"; \ echo "$${deps}"; \ diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS index 0fadafbdd..a08a434e6 100644 --- a/vendor/sigs.k8s.io/json/OWNERS +++ b/vendor/sigs.k8s.io/json/OWNERS @@ -2,5 +2,5 @@ approvers: - deads2k - - lavalamp + - jpbetz - liggitt diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index 6a13cf2df..d538ac119 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -21,10 +21,10 @@ import ( // Unmarshal parses the JSON-encoded data and stores the result // in the value pointed to by v. If v is nil or not a pointer, -// Unmarshal returns an InvalidUnmarshalError. +// Unmarshal returns an [InvalidUnmarshalError]. // // Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, +// [Marshal] uses, allocating maps, slices, and pointers as necessary, // with the following additional rules: // // To unmarshal JSON into a pointer, Unmarshal first handles the case of @@ -33,28 +33,28 @@ import ( // the value pointed at by the pointer. If the pointer is nil, Unmarshal // allocates a new value for it to point to. // -// To unmarshal JSON into a value implementing the Unmarshaler interface, -// Unmarshal calls that value's UnmarshalJSON method, including +// To unmarshal JSON into a value implementing [Unmarshaler], +// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including // when the input is a JSON null. -// Otherwise, if the value implements encoding.TextUnmarshaler -// and the input is a JSON quoted string, Unmarshal calls that value's -// UnmarshalText method with the unquoted form of the string. +// Otherwise, if the value implements [encoding.TextUnmarshaler] +// and the input is a JSON quoted string, Unmarshal calls +// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string. // // To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), +// keys to the keys used by [Marshal] (either the struct field name or its tag), // preferring an exact match but also accepting a case-insensitive match. By // default, object keys which don't have a corresponding struct field are -// ignored (see Decoder.DisallowUnknownFields for an alternative). +// ignored (see [Decoder.DisallowUnknownFields] for an alternative). // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: // -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null +// - bool, for JSON booleans +// - float64, for JSON numbers +// - string, for JSON strings +// - []interface{}, for JSON arrays +// - map[string]interface{}, for JSON objects +// - nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length // to zero and then appends each element to the slice. @@ -72,16 +72,15 @@ import ( // use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal // reuses the existing map, keeping existing entries. Unmarshal then stores // key-value pairs from the JSON object into the map. The map's key type must -// either be any string type, an integer, implement json.Unmarshaler, or -// implement encoding.TextUnmarshaler. +// either be any string type, an integer, or implement [encoding.TextUnmarshaler]. // -// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError]. // // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. // If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. In any +// an [UnmarshalTypeError] describing the earliest such error. In any // case, it's not guaranteed that all the remaining fields following // the problematic one will be unmarshaled into the target object. // @@ -119,7 +118,7 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. // -// By convention, to approximate the behavior of Unmarshal itself, +// By convention, to approximate the behavior of [Unmarshal] itself, // Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error @@ -157,8 +156,8 @@ func (e *UnmarshalFieldError) Error() string { return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() } -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) +// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal]. +// (The argument to [Unmarshal] must be a non-nil pointer.) type InvalidUnmarshalError struct { Type reflect.Type } @@ -573,17 +572,10 @@ func (d *decodeState) array(v reflect.Value) error { break } - // Get element of array, growing if necessary. + // Expand slice length, growing the slice if necessary. if v.Kind() == reflect.Slice { - // Grow slice if necessary if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) + v.Grow(1) } if i >= v.Len() { v.SetLen(i + 1) @@ -620,13 +612,11 @@ func (d *decodeState) array(v reflect.Value) error { if i < v.Len() { if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) for ; i < v.Len(); i++ { - v.Index(i).Set(z) + v.Index(i).SetZero() // zero remainder of array } } else { - v.SetLen(i) + v.SetLen(i) // truncate the slice } } if i == 0 && v.Kind() == reflect.Slice { @@ -636,7 +626,7 @@ func (d *decodeState) array(v reflect.Value) error { } var nullLiteral = []byte("null") -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() // object consumes an object from d.data[d.off-1:], decoding into v. // The first byte ('{') of the object has been read already. @@ -776,7 +766,7 @@ func (d *decodeState) object(v reflect.Value) error { if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { - mapElem.Set(reflect.Zero(elemType)) + mapElem.SetZero() } subv = mapElem if checkDuplicateField != nil { @@ -784,28 +774,14 @@ func (d *decodeState) object(v reflect.Value) error { } d.appendStrictFieldStackKey(string(key)) } else { - var f *field - if i, ok := fields.nameIndex[string(key)]; ok { - // Found an exact name match. - f = &fields.list[i] - if checkDuplicateField != nil { - checkDuplicateField(i, f.name) - } - } else if !d.caseSensitive { - // Fall back to the expensive case-insensitive - // linear search. - for i := range fields.list { - ff := &fields.list[i] - if ff.equalFold(ff.nameBytes, key) { - f = ff - if checkDuplicateField != nil { - checkDuplicateField(i, f.name) - } - break - } - } + f := fields.byExactName[string(key)] + if f == nil && !d.caseSensitive { + f = fields.byFoldedName[string(foldName(key))] } if f != nil { + if checkDuplicateField != nil { + checkDuplicateField(f.listIndex, f.name) + } subv = v destring = f.quoted for _, i := range f.index { @@ -874,33 +850,35 @@ func (d *decodeState) object(v reflect.Value) error { if v.Kind() == reflect.Map { kt := t.Key() var kv reflect.Value - switch { - case reflect.PointerTo(kt).Implements(textUnmarshalerType): + if reflect.PointerTo(kt).Implements(textUnmarshalerType) { kv = reflect.New(kt) if err := d.literalStore(item, kv, true); err != nil { return err } kv = kv.Elem() - case kt.Kind() == reflect.String: - kv = reflect.ValueOf(key).Convert(kt) - default: + } else { switch kt.Kind() { + case reflect.String: + kv = reflect.New(kt).Elem() + kv.SetString(string(key)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: s := string(key) n, err := strconv.ParseInt(s, 10, 64) - if err != nil || reflect.Zero(kt).OverflowInt(n) { + if err != nil || kt.OverflowInt(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) break } - kv = reflect.ValueOf(n).Convert(kt) + kv = reflect.New(kt).Elem() + kv.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: s := string(key) n, err := strconv.ParseUint(s, 10, 64) - if err != nil || reflect.Zero(kt).OverflowUint(n) { + if err != nil || kt.OverflowUint(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) break } - kv = reflect.ValueOf(n).Convert(kt) + kv = reflect.New(kt).Elem() + kv.SetUint(n) default: panic("json: Unexpected key type") // should never occur } @@ -950,12 +928,12 @@ func (d *decodeState) convertNumber(s string) (any, error) { f, err := strconv.ParseFloat(s, 64) if err != nil { - return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)} } return f, nil } -var numberType = reflect.TypeOf(Number("")) +var numberType = reflect.TypeFor[Number]() // literalStore decodes a literal stored in item into v. // @@ -965,7 +943,7 @@ var numberType = reflect.TypeOf(Number("")) func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { // Check for unmarshaler. if len(item) == 0 { - //Empty string given + // Empty string given. d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) return nil } @@ -1012,7 +990,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } switch v.Kind() { case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) + v.SetZero() // otherwise, ignore null for primitives/string } case 't', 'f': // true, false @@ -1064,10 +1042,11 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } v.SetBytes(b[:n]) case reflect.String: - if v.Type() == numberType && !isValidNumber(string(s)) { + t := string(s) + if v.Type() == numberType && !isValidNumber(t) { return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) } - v.SetString(string(s)) + v.SetString(t) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(string(s))) @@ -1083,13 +1062,12 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } panic(phasePanicMsg) } - s := string(item) switch v.Kind() { default: if v.Kind() == reflect.String && v.Type() == numberType { // s must be a valid number, because it's // already been tokenized. - v.SetString(s) + v.SetString(string(item)) break } if fromQuoted { @@ -1097,7 +1075,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) case reflect.Interface: - n, err := d.convertNumber(s) + n, err := d.convertNumber(string(item)) if err != nil { d.saveError(err) break @@ -1109,25 +1087,25 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool v.Set(reflect.ValueOf(n)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) + n, err := strconv.ParseInt(string(item), 10, 64) if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) + n, err := strconv.ParseUint(string(item), 10, 64) if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetUint(n) case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) + n, err := strconv.ParseFloat(string(item), v.Type().Bits()) if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetFloat(n) diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index 5b67251fb..eb73bff58 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -12,12 +12,13 @@ package json import ( "bytes" + "cmp" "encoding" "encoding/base64" "fmt" "math" "reflect" - "sort" + "slices" "strconv" "strings" "sync" @@ -28,29 +29,30 @@ import ( // Marshal returns the JSON encoding of v. // // Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method and encodes the result as a JSON string. +// If an encountered value implements [Marshaler] +// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON] +// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the +// value implements [encoding.TextMarshaler] instead, Marshal calls +// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string. // The nil pointer exception is not strictly necessary // but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. +// [Unmarshaler.UnmarshalJSON]. // // Otherwise, Marshal uses the following type-dependent default encodings: // // Boolean values encode as JSON booleans. // -// Floating point, integer, and Number values encode as JSON numbers. +// Floating point, integer, and [Number] values encode as JSON numbers. +// NaN and +/-Inf values will return an [UnsupportedValueError]. // // String values encode as JSON strings coerced to valid UTF-8, // replacing invalid bytes with the Unicode replacement rune. // So that the JSON will be safe to embed inside HTML